diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml new file mode 100644 index 0000000000..b7b35d1207 --- /dev/null +++ b/.github/workflows/docker-antithesis.yml @@ -0,0 +1,31 @@ +name: docker antithesis + +on: + push: + branches: + - unstable + +env: + ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }} + ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }} + ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }} + REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }} + IMAGE_NAME: lighthouse + TAG: libvoidstar + +jobs: + build-docker: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Update Rust + run: rustup update stable + - name: Dockerhub login + run: | + echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin + - name: Build AMD64 dockerfile (with push) + run: | + docker build \ + --tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \ + --file ./testing/antithesis/Dockerfile.libvoidstar . + docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml new file mode 100644 index 0000000000..c23ee8df36 --- /dev/null +++ b/.github/workflows/linkcheck.yml @@ -0,0 +1,30 @@ +name: linkcheck + +on: + push: + branches: + - unstable + pull_request: + paths: + - 'book/**' + +jobs: + linkcheck: + name: Check broken links + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Create docker network + run: docker network create book + + - name: Run mdbook server + run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:latest serve --hostname 0.0.0.0 + + - name: Print logs + run: docker logs book + + - name: Run linkcheck + run: docker run --network book tennox/linkcheck:latest book:3000 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bca28dbe2a..4c57b8b1e7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -130,6 +130,19 @@ jobs: profile: minimal override: true + # ============================== + # Windows dependencies + # ============================== + + - uses: KyleMayes/install-llvm-action@v1 + if: startsWith(matrix.arch, 'x86_64-windows') + with: + version: "13.0" + directory: ${{ runner.temp }}/llvm + - name: Set LIBCLANG_PATH + if: startsWith(matrix.arch, 'x86_64-windows') + run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV + # ============================== # Builds # ============================== diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 95a2b8adfc..a4e49b1c26 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -12,7 +12,7 @@ env: # Deny warnings in CI RUSTFLAGS: "-D warnings" # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2021-06-09 + PINNED_NIGHTLY: nightly-2021-12-01 jobs: target-branch-check: name: target-branch-check @@ -54,6 +54,12 @@ jobs: run: npm install -g ganache-cli - name: Install make run: choco install -y make + - uses: KyleMayes/install-llvm-action@v1 + with: + version: "13.0" + directory: ${{ runner.temp }}/llvm + - name: Set LIBCLANG_PATH + run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - name: Run tests in release run: make test-release beacon-chain-tests: diff --git a/Cargo.lock b/Cargo.lock index c1aa1b7abb..b220100832 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -111,7 +111,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "once_cell", "version_check", ] @@ -130,15 +130,6 @@ name = "amcl" version = "0.3.0" source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -dependencies = [ - "winapi", -] - [[package]] name = "ansi_term" version = "0.12.1" @@ -150,9 +141,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.45" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee10e43ae4a853c0a3591d4e2ada1719e553be18199d9da9d4a83f5927c2f5c7" +checksum = "84450d0b4a8bd1ba4144ce8ce718fbc5d071358b1e5384bace6536b3d1f2d5b3" [[package]] name = "arbitrary" @@ -195,9 +186,9 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", @@ -214,7 +205,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", ] [[package]] @@ -291,16 +282,17 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" name = "beacon_chain" version = "0.2.0" dependencies = [ - "bitvec 0.19.5", + "bitvec 0.19.6", "bls", "derivative", "environment", "eth1", "eth2", "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz_types 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_types", + "execution_layer", "fork_choice", "futures", "genesis", @@ -318,6 +310,7 @@ dependencies = [ "rand 0.7.3", "rayon", "safe_arith", + "sensitive_url", "serde", "serde_derive", "slasher", @@ -328,16 +321,17 @@ dependencies = [ "state_processing", "store", "strum", + "superstruct", "task_executor", "tempfile", "tokio", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "types", ] [[package]] name = "beacon_node" -version = "2.0.1" +version = "2.1.0" dependencies = [ "beacon_chain", "clap", @@ -375,6 +369,25 @@ dependencies = [ "serde", ] +[[package]] +name = "bindgen" +version = "0.59.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", +] + [[package]] name = "bitflags" version = "1.2.1" @@ -393,9 +406,9 @@ dependencies = [ [[package]] name = "bitvec" -version = "0.19.5" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" +checksum = "55f93d0ef3363c364d5976646a38f04cf67cfe1d4c8d160cdea02cab2c116b33" dependencies = [ "funty", "radium 0.5.3", @@ -422,7 +435,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" dependencies = [ "crypto-mac 0.8.0", - "digest", + "digest 0.9.0", "opaque-debug", ] @@ -436,6 +449,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-buffer" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1d36a02058e76b040de25a4464ba1c80935655595b661505c8b39b664828b95" +dependencies = [ + "generic-array", +] + [[package]] name = "block-padding" version = "0.2.1" @@ -449,15 +471,15 @@ dependencies = [ "arbitrary", "blst", "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_serde_utils 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_serde_utils", + "eth2_ssz", "ethereum-types 0.12.1", "hex", "milagro_bls", "rand 0.7.3", "serde", "serde_derive", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "zeroize", ] @@ -475,12 +497,13 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.0.1" +version = "2.1.0" dependencies = [ "beacon_node", "clap", "clap_utils", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_network_config", + "eth2_ssz", "hex", "lighthouse_network", "log", @@ -527,9 +550,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.8.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "byte-slice-cast" @@ -581,14 +604,14 @@ name = "cached_tree_hash" version = "0.1.0" dependencies = [ "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz_types 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_types", "ethereum-types 0.12.1", "quickcheck", "quickcheck_macros", "smallvec", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", ] [[package]] @@ -606,6 +629,15 @@ version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom 7.1.0", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -660,12 +692,23 @@ dependencies = [ ] [[package]] -name = "clap" -version = "2.33.3" +name = "clang-sys" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" dependencies = [ - "ansi_term 0.11.0", + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "ansi_term", "atty", "bitflags", "strsim 0.8.0", @@ -681,7 +724,8 @@ dependencies = [ "clap", "dirs", "eth2_network_config", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", + "ethereum-types 0.12.1", "hex", ] @@ -697,6 +741,7 @@ dependencies = [ "eth1", "eth2", "eth2_config", + "execution_layer", "genesis", "http_api", "http_metrics", @@ -724,9 +769,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.46" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b858541263efe664aead4a5209a4ae5c5d2811167d4ed4ee0944503f8d2089" +checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" dependencies = [ "cc", ] @@ -762,6 +807,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279bc8fc53f788a75c7804af68237d1fce02cde1e275a886a4b320604dc2aeda" +[[package]] +name = "const-oid" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + [[package]] name = "convert_case" version = "0.4.0" @@ -804,9 +855,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" dependencies = [ "cfg-if", ] @@ -849,9 +900,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" dependencies = [ "cfg-if", "crossbeam-utils", @@ -870,9 +921,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" dependencies = [ "cfg-if", "crossbeam-utils", @@ -883,9 +934,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" +checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" dependencies = [ "cfg-if", "lazy_static", @@ -897,6 +948,27 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array", + "rand_core 0.6.3", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d6b536309245c849479fba3da410962a43ed8e51c26b729208ec0ac2798d0" +dependencies = [ + "generic-array", +] + [[package]] name = "crypto-mac" version = "0.8.0" @@ -925,7 +997,7 @@ checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr", "csv-core", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] @@ -950,11 +1022,11 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377c9b002a72a0b2c1a18c62e2f3864bdfea4a015e3683a96e24aa45dd6c02d1" +checksum = "a19c6cedffdc8c03a3346d723eb20bd85a13362bb96dc2ac000842c6381ec7bf" dependencies = [ - "nix 0.22.2", + "nix 0.23.1", "winapi", ] @@ -965,7 +1037,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", - "digest", + "digest 0.9.0", "rand_core 0.5.1", "subtle", "zeroize", @@ -973,43 +1045,19 @@ dependencies = [ [[package]] name = "darling" -version = "0.12.4" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f2c43f534ea4b0b049015d00269734195e6d3f0f6635cb692251aca6f9f8b3c" +checksum = "d0d720b8683f8dd83c65155f0530560cba68cd2bf395f6513a483caee57ff7f4" dependencies = [ - "darling_core 0.12.4", - "darling_macro 0.12.4", -] - -[[package]] -name = "darling" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "757c0ded2af11d8e739c4daea1ac623dd1624b06c844cf3f5a39f1bdbd99bb12" -dependencies = [ - "darling_core 0.13.0", - "darling_macro 0.13.0", + "darling_core", + "darling_macro", ] [[package]] name = "darling_core" -version = "0.12.4" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e91455b86830a1c21799d94524df0845183fa55bafd9aa137b01c7d1065fa36" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn", -] - -[[package]] -name = "darling_core" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c34d8efb62d0c2d7f60ece80f75e5c63c1588ba68032740494b0b9a996466e3" +checksum = "7a340f241d2ceed1deb47ae36c4144b2707ec7dd0b649f894cb39bb595986324" dependencies = [ "fnv", "ident_case", @@ -1021,22 +1069,11 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.12.4" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29b5acf0dea37a7f66f7b25d2c5e93fd46f8f6968b1a5d7a3e02e97768afc95a" +checksum = "72c41b3b7352feb3211a0d743dc5700a4e3b60f51bd2b368892d1e0f9a95f44b" dependencies = [ - "darling_core 0.12.4", - "quote", - "syn", -] - -[[package]] -name = "darling_macro" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" -dependencies = [ - "darling_core 0.13.0", + "darling_core", "quote", "syn", ] @@ -1072,13 +1109,13 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" name = "deposit_contract" version = "0.2.0" dependencies = [ - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "ethabi 12.0.0", "hex", "reqwest", "serde_json", - "sha2", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.9.9", + "tree_hash", "types", ] @@ -1088,10 +1125,19 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eeb9d92785d1facb50567852ce75d0858630630e7eabea59cf7eb7474051087" dependencies = [ - "const-oid", + "const-oid 0.5.2", "typenum", ] +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid 0.7.1", +] + [[package]] name = "derivative" version = "2.2.0" @@ -1116,14 +1162,14 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.16" +version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.3.3", + "rustc_version 0.4.0", "syn", ] @@ -1136,6 +1182,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "digest" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b697d66081d42af4fba142d56918a3cb21dc8eb63372c6b85d14f44fb9c5979b" +dependencies = [ + "block-buffer 0.10.0", + "crypto-common", + "generic-array", +] + [[package]] name = "directory" version = "0.1.0" @@ -1188,14 +1245,14 @@ dependencies = [ [[package]] name = "discv5" -version = "0.1.0-beta.11" +version = "0.1.0-beta.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4968631f2eb03ef8dff74fe355440bcf4bd1c514c4326325fc739640c4ec53" +checksum = "ed8f54486179d5a7f11e1f5526f49d925a411a96c1141a707bd5f071be2ab630" dependencies = [ "aes", "aes-gcm", "arrayvec 0.7.2", - "digest", + "digest 0.10.1", "enr", "fnv", "futures", @@ -1203,12 +1260,12 @@ dependencies = [ "hex", "hkdf", "lazy_static", - "libp2p-core 0.29.0", + "libp2p-core 0.30.0", "lru", "parking_lot", "rand 0.8.4", "rlp 0.5.1", - "sha2", + "sha2 0.9.9", "smallvec", "tokio", "tokio-stream", @@ -1231,17 +1288,29 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34d33b390ab82f2e1481e331dbd0530895640179d2128ef9a79cc690b78d1eba" dependencies = [ - "der", - "elliptic-curve", + "der 0.3.5", + "elliptic-curve 0.9.12", "hmac 0.11.0", "signature", ] [[package]] -name = "ed25519" -version = "1.2.0" +name = "ecdsa" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" +checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +dependencies = [ + "der 0.5.1", + "elliptic-curve 0.11.6", + "rfc6979", + "signature", +] + +[[package]] +name = "ed25519" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" dependencies = [ "signature", ] @@ -1256,7 +1325,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2", + "sha2 0.9.9", "zeroize", ] @@ -1270,9 +1339,10 @@ dependencies = [ "compare_fields", "compare_fields_derive", "derivative", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.12.1", + "fork_choice", "fs2", "hex", "rayon", @@ -1284,7 +1354,7 @@ dependencies = [ "state_processing", "store", "swap_or_not_shuffle", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "tree_hash_derive 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "types", ] @@ -1302,9 +1372,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" dependencies = [ "bitvec 0.20.4", - "ff", + "ff 0.9.0", "generic-array", - "group", + "group 0.9.0", "pkcs8", "rand_core 0.6.3", "subtle", @@ -1312,10 +1382,27 @@ dependencies = [ ] [[package]] -name = "encoding_rs" -version = "0.8.29" +name = "elliptic-curve" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a74ea89a0a1b98f6332de42c95baff457ada66d1cb4030f9ff151b2041a1c746" +checksum = "decb3a27ea454a5f23f96eb182af0671c12694d64ecc33dada74edd1301f6cfc" +dependencies = [ + "crypto-bigint", + "der 0.5.1", + "ff 0.11.0", + "generic-array", + "group 0.11.0", + "rand_core 0.6.3", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "encoding_rs" +version = "0.8.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ "cfg-if", ] @@ -1383,7 +1470,6 @@ dependencies = [ "eth2_config", "eth2_network_config", "exit-future", - "filesystem", "futures", "logging", "slog", @@ -1413,7 +1499,7 @@ dependencies = [ "environment", "eth1_test_rig", "eth2", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "fallback", "futures", @@ -1432,7 +1518,7 @@ dependencies = [ "task_executor", "tokio", "toml", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "types", "web3", ] @@ -1455,8 +1541,8 @@ dependencies = [ "account_utils", "bytes", "eth2_keystore", - "eth2_serde_utils 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_serde_utils", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures", "futures-util", @@ -1490,7 +1576,7 @@ dependencies = [ "lazy_static", "ring", "rustc-hex", - "sha2", + "sha2 0.9.9", "wasm-bindgen-test", ] @@ -1503,7 +1589,7 @@ dependencies = [ "cpufeatures 0.1.5", "lazy_static", "ring", - "sha2", + "sha2 0.9.9", ] [[package]] @@ -1529,7 +1615,7 @@ dependencies = [ "hex", "num-bigint-dig", "ring", - "sha2", + "sha2 0.9.9", "zeroize", ] @@ -1548,7 +1634,7 @@ dependencies = [ "serde", "serde_json", "serde_repr", - "sha2", + "sha2 0.9.9", "tempfile", "unicode-normalization", "uuid", @@ -1561,7 +1647,7 @@ version = "0.2.0" dependencies = [ "enr", "eth2_config", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "serde_yaml", "tempfile", "types", @@ -1570,49 +1656,29 @@ dependencies = [ [[package]] name = "eth2_serde_utils" -version = "0.1.0" +version = "0.1.1" dependencies = [ + "ethereum-types 0.12.1", "hex", "serde", "serde_derive", "serde_json", ] -[[package]] -name = "eth2_serde_utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "477fffc25490dfc866288273f96344c6879676a1337187fc39245cd422e10825" -dependencies = [ - "hex", - "serde", - "serde_derive", -] - [[package]] name = "eth2_ssz" -version = "0.4.0" +version = "0.4.1" dependencies = [ "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.12.1", "smallvec", ] -[[package]] -name = "eth2_ssz" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "948e343aa022785c07193f41ed37adfd9dd0350368060803b8302c7f798e8306" -dependencies = [ - "ethereum-types 0.12.1", - "smallvec", -] - [[package]] name = "eth2_ssz_derive" version = "0.3.0" dependencies = [ - "darling 0.13.0", + "darling", "proc-macro2", "quote", "syn", @@ -1624,7 +1690,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "635b86d2c941bb71e7419a571e1763d65c93e51a1bafc400352e3bef6ff59fc9" dependencies = [ - "darling 0.13.0", + "darling", "proc-macro2", "quote", "syn", @@ -1632,34 +1698,20 @@ dependencies = [ [[package]] name = "eth2_ssz_types" -version = "0.2.1" +version = "0.2.2" dependencies = [ "arbitrary", - "eth2_serde_utils 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "derivative", + "eth2_serde_utils", + "eth2_ssz", "serde", "serde_derive", "serde_json", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "tree_hash_derive 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "typenum", ] -[[package]] -name = "eth2_ssz_types" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9423ac7fb37037f828a32b724cdfa65ea62290055811731402a90fb8a5bcbb1" -dependencies = [ - "arbitrary", - "eth2_serde_utils 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde", - "serde_derive", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "typenum", -] - [[package]] name = "eth2_wallet" version = "0.1.0" @@ -1783,6 +1835,35 @@ dependencies = [ "uint 0.9.1", ] +[[package]] +name = "execution_layer" +version = "0.1.0" +dependencies = [ + "async-trait", + "bytes", + "environment", + "eth1", + "eth2_serde_utils", + "eth2_ssz_types", + "exit-future", + "futures", + "hex", + "lru", + "parking_lot", + "reqwest", + "sensitive_url", + "serde", + "serde_json", + "slog", + "slot_clock", + "task_executor", + "tokio", + "tree_hash", + "tree_hash_derive 0.4.0", + "types", + "warp 0.3.0", +] + [[package]] name = "exit-future" version = "0.2.0" @@ -1811,6 +1892,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" +dependencies = [ + "instant", +] + [[package]] name = "ff" version = "0.9.0" @@ -1822,6 +1912,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2958d04124b9f27f175eaeb9a9f383d026098aa837eadd8ba22c11f13a05b9e" +dependencies = [ + "rand_core 0.6.3", + "subtle", +] + [[package]] name = "ffi-opaque" version = "2.0.1" @@ -1872,15 +1972,9 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.2.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" - -[[package]] -name = "fixedbitset" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" +checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" [[package]] name = "flate2" @@ -1921,7 +2015,7 @@ name = "fork_choice" version = "0.1.0" dependencies = [ "beacon_chain", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "proto_array", "store", @@ -1956,9 +2050,9 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" dependencies = [ "futures-channel", "futures-core", @@ -1971,9 +2065,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" dependencies = [ "futures-core", "futures-sink", @@ -1981,15 +2075,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" dependencies = [ "futures-core", "futures-task", @@ -1999,18 +2093,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" dependencies = [ - "autocfg 1.0.1", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -2023,21 +2115,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d383f0425d991a05e564c2f3ec150bd6dde863179c131dd60d8aa73a05434461" dependencies = [ "futures-io", - "rustls 0.20.1", + "rustls 0.20.2", "webpki 0.22.0", ] [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" [[package]] name = "futures-timer" @@ -2047,11 +2139,10 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" dependencies = [ - "autocfg 1.0.1", "futures-channel", "futures-core", "futures-io", @@ -2059,18 +2150,16 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", "version_check", @@ -2084,7 +2173,7 @@ dependencies = [ "eth1", "eth1_test_rig", "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "futures", "int_to_bytes", "merkle_proof", @@ -2093,7 +2182,7 @@ dependencies = [ "slog", "state_processing", "tokio", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "types", ] @@ -2112,9 +2201,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ "cfg-if", "libc", @@ -2171,16 +2260,27 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" dependencies = [ - "ff", + "ff 0.9.0", + "rand_core 0.6.3", + "subtle", +] + +[[package]] +name = "group" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" +dependencies = [ + "ff 0.11.0", "rand_core 0.6.3", "subtle", ] [[package]] name = "h2" -version = "0.3.7" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fd819562fcebdac5afc5c113c3ec36f902840b70fd4fc458799c8ce4607ae55" +checksum = "0c9de88456263e249e241fcd211d3954e2c9b0ef7ccfc235a444eb367cae3689" dependencies = [ "bytes", "fnv", @@ -2289,7 +2389,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01706d578d5c281058480e673ae4086a9f4710d8df1ad80a5b03e39ece5f886b" dependencies = [ - "digest", + "digest 0.9.0", "hmac 0.11.0", ] @@ -2300,7 +2400,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ "crypto-mac 0.8.0", - "digest", + "digest 0.9.0", ] [[package]] @@ -2310,7 +2410,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac 0.11.1", - "digest", + "digest 0.9.0", ] [[package]] @@ -2319,7 +2419,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ - "digest", + "digest 0.9.0", "generic-array", "hmac 0.8.1", ] @@ -2337,13 +2437,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 1.0.1", ] [[package]] @@ -2354,7 +2454,7 @@ checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", ] [[package]] @@ -2366,7 +2466,7 @@ dependencies = [ "environment", "eth1", "eth2", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "futures", "hex", "lazy_static", @@ -2382,9 +2482,9 @@ dependencies = [ "store", "tokio", "tokio-stream", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "types", - "warp", + "warp 0.3.2", "warp_utils", ] @@ -2405,7 +2505,7 @@ dependencies = [ "store", "tokio", "types", - "warp", + "warp 0.3.2", "warp_utils", ] @@ -2417,9 +2517,9 @@ checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "humantime" @@ -2429,9 +2529,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.15" +version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436ec0091e4f20e655156a30a0df3770fe2900aa301e548e08446ec794b6953c" +checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ "bytes", "futures-channel", @@ -2442,8 +2542,8 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", - "pin-project-lite 0.2.7", + "itoa 0.4.8", + "pin-project-lite 0.2.8", "socket2 0.4.2", "tokio", "tower-service", @@ -2492,6 +2592,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "if-addrs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "if-addrs-sys" version = "0.3.2" @@ -2573,14 +2683,23 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg 1.0.1", "hashbrown", ] +[[package]] +name = "input_buffer" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" +dependencies = [ + "bytes", +] + [[package]] name = "instant" version = "0.1.12" @@ -2628,9 +2747,9 @@ checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itertools" -version = "0.10.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" +checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" dependencies = [ "either", ] @@ -2641,6 +2760,12 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + [[package]] name = "js-sys" version = "0.3.55" @@ -2672,9 +2797,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3e8e491ed22bc161583a1c77e42313672c483eba6bd9d7afec0f1131d0b9ce" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", - "sha2", + "ecdsa 0.11.1", + "elliptic-curve 0.9.12", + "sha2 0.9.9", ] [[package]] @@ -2692,9 +2817,15 @@ dependencies = [ "spin", ] +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "lcli" -version = "2.0.1" +version = "2.1.0" dependencies = [ "account_utils", "bls", @@ -2707,9 +2838,10 @@ dependencies = [ "eth1_test_rig", "eth2", "eth2_network_config", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_wallet", "genesis", + "int_to_bytes", "lighthouse_network", "lighthouse_version", "log", @@ -2718,7 +2850,7 @@ dependencies = [ "serde_json", "serde_yaml", "state_processing", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "types", "validator_dir", "web3", @@ -2749,9 +2881,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.107" +version = "0.2.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" [[package]] name = "libflate" @@ -2773,6 +2905,16 @@ dependencies = [ "rle-decode-fast", ] +[[package]] +name = "libloading" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" +dependencies = [ + "cfg-if", + "winapi", +] + [[package]] name = "libm" version = "0.2.1" @@ -2780,25 +2922,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] -name = "libp2p" -version = "0.41.0" +name = "libmdbx" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "782229f90bf7d5b12ee3ee08f7e160ba99f0d75eee7d118d9c1a688b13f6e64a" +checksum = "c9a8a3723c12c5caa3f2a456b645063d1d8ffb1562895fa43746a999d205b0c6" +dependencies = [ + "bitflags", + "byteorder", + "derive_more", + "indexmap", + "libc", + "mdbx-sys", + "parking_lot", + "thiserror", +] + +[[package]] +name = "libp2p" +version = "0.42.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "atomic", "bytes", "futures", "futures-timer", - "getrandom 0.2.3", + "getrandom 0.2.4", "instant", "lazy_static", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", "libp2p-metrics", "libp2p-mplex", "libp2p-noise", + "libp2p-plaintext", "libp2p-swarm", "libp2p-swarm-derive", "libp2p-tcp", @@ -2806,45 +2964,11 @@ dependencies = [ "libp2p-yamux", "multiaddr", "parking_lot", - "pin-project 1.0.8", + "pin-project 1.0.10", "rand 0.7.3", "smallvec", ] -[[package]] -name = "libp2p-core" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af9b4abdeaa420593a297c8592f63fad4234f4b88dc9343b8fd8e736c35faa59" -dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek", - "either", - "fnv", - "futures", - "futures-timer", - "lazy_static", - "libsecp256k1 0.5.0", - "log", - "multiaddr", - "multihash", - "multistream-select", - "parking_lot", - "pin-project 1.0.8", - "prost 0.8.0", - "prost-build 0.8.0", - "rand 0.7.3", - "ring", - "rw-stream-sink", - "sha2", - "smallvec", - "thiserror", - "unsigned-varint 0.7.1", - "void", - "zeroize", -] - [[package]] name = "libp2p-core" version = "0.30.0" @@ -2863,15 +2987,50 @@ dependencies = [ "log", "multiaddr", "multihash", - "multistream-select", + "multistream-select 0.10.4", "parking_lot", - "pin-project 1.0.8", - "prost 0.9.0", - "prost-build 0.9.0", + "pin-project 1.0.10", + "prost", + "prost-build", "rand 0.8.4", "ring", "rw-stream-sink", - "sha2", + "sha2 0.9.9", + "smallvec", + "thiserror", + "unsigned-varint 0.7.1", + "void", + "zeroize", +] + +[[package]] +name = "libp2p-core" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" +dependencies = [ + "asn1_der", + "bs58", + "ed25519-dalek", + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "lazy_static", + "libsecp256k1 0.7.0", + "log", + "multiaddr", + "multihash", + "multistream-select 0.11.0", + "p256", + "parking_lot", + "pin-project 1.0.10", + "prost", + "prost-build", + "rand 0.8.4", + "ring", + "rw-stream-sink", + "sha2 0.10.1", "smallvec", "thiserror", "unsigned-varint 0.7.1", @@ -2881,12 +3040,11 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb8f89d15cb6e3c5bc22afff7513b11bab7856f2872d3cfba86f7f63a06bc498" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "smallvec", "trust-dns-resolver", @@ -2894,9 +3052,8 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98942284cc1a91f24527a8b1e5bc06f7dd22fc6cee5be3d9bf5785bf902eb934" +version = "0.35.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asynchronous-codec", "base64 0.13.0", @@ -2907,44 +3064,42 @@ dependencies = [ "futures-timer", "hex_fmt", "instant", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-swarm", "log", "open-metrics-client", - "pin-project 1.0.8", - "prost 0.9.0", - "prost-build 0.9.0", + "pin-project 1.0.10", + "prost", + "prost-build", "rand 0.7.3", "regex", - "sha2", + "sha2 0.10.1", "smallvec", "unsigned-varint 0.7.1", ] [[package]] name = "libp2p-identify" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec6d59e3f88435a83797fc3734f18385f6f54e0fe081e12543573364687c7db5" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", "futures-timer", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-swarm", "log", "lru", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "smallvec", ] [[package]] name = "libp2p-metrics" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59f3be49edeecff13ef0d0dc28295ba4a33910611715f04236325d08e4119e0" +version = "0.3.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -2953,14 +3108,13 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2cd64ef597f40e14bfce0497f50ecb63dd6d201c61796daeb4227078834fbf" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "nohash-hasher", "parking_lot", @@ -2971,37 +3125,51 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8772c7a99088221bb7ca9c5c0574bf55046a7ab4c319f3619b275f28c8fb87a" +version = "0.34.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "bytes", "curve25519-dalek", "futures", "lazy_static", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "rand 0.8.4", - "sha2", + "sha2 0.10.1", "snow", "static_assertions", "x25519-dalek", "zeroize", ] +[[package]] +name = "libp2p-plaintext" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "libp2p-core 0.31.0", + "log", + "prost", + "prost-build", + "unsigned-varint 0.7.1", + "void", +] + [[package]] name = "libp2p-swarm" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb84d40627cd109bbbf43da9269d4ef75903f42356c88d98b2b55c47c430c792" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "either", "futures", "futures-timer", "instant", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "rand 0.7.3", "smallvec", @@ -3010,9 +3178,8 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd93a7dad9b61c39797572e4fb4fdba8415d6348b4e745b3d4cb008f84331ab" +version = "0.26.1" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "quote", "syn", @@ -3020,16 +3187,15 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7399c5b6361ef525d41c11fcf51635724f832baf5819b30d3d873eabb4fbae4b" +version = "0.31.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", "futures-timer", - "if-addrs", + "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "socket2 0.4.2", "tokio", @@ -3037,14 +3203,13 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa92005fbd67695715c821e1acfe4d7be9fd2d88738574e93d645c49ec2831c8" +version = "0.33.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "log", "quicksink", "rw-stream-sink", @@ -3055,36 +3220,16 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7362abb8867d7187e7e93df17f460d554c997fc5c8ac57dc1259057f6889af" +version = "0.35.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" dependencies = [ "futures", - "libp2p-core 0.30.0", + "libp2p-core 0.31.0", "parking_lot", "thiserror", "yamux", ] -[[package]] -name = "libsecp256k1" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" -dependencies = [ - "arrayref", - "base64 0.12.3", - "digest", - "hmac-drbg", - "libsecp256k1-core 0.2.2", - "libsecp256k1-gen-ecmult 0.2.1", - "libsecp256k1-gen-genmult 0.2.1", - "rand 0.7.3", - "serde", - "sha2", - "typenum", -] - [[package]] name = "libsecp256k1" version = "0.6.0" @@ -3093,14 +3238,14 @@ checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" dependencies = [ "arrayref", "base64 0.12.3", - "digest", + "digest 0.9.0", "hmac-drbg", "libsecp256k1-core 0.2.2", "libsecp256k1-gen-ecmult 0.2.1", "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", "serde", - "sha2", + "sha2 0.9.9", "typenum", ] @@ -3112,14 +3257,14 @@ checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" dependencies = [ "arrayref", "base64 0.13.0", - "digest", + "digest 0.9.0", "hmac-drbg", "libsecp256k1-core 0.3.0", "libsecp256k1-gen-ecmult 0.3.0", "libsecp256k1-gen-genmult 0.3.0", "rand 0.8.4", "serde", - "sha2", + "sha2 0.9.9", "typenum", ] @@ -3130,7 +3275,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80" dependencies = [ "crunchy", - "digest", + "digest 0.9.0", "subtle", ] @@ -3141,7 +3286,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" dependencies = [ "crunchy", - "digest", + "digest 0.9.0", "subtle", ] @@ -3205,7 +3350,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.0.1" +version = "2.1.0" dependencies = [ "account_manager", "account_utils", @@ -3214,6 +3359,7 @@ dependencies = [ "boot_node", "clap", "clap_utils", + "directory", "env_logger 0.9.0", "environment", "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3252,9 +3398,9 @@ dependencies = [ "dirs", "discv5", "error-chain", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz_types 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_types", "exit-future", "fnv", "futures", @@ -3265,19 +3411,20 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru", + "open-metrics-client", "parking_lot", "rand 0.7.3", "regex", "serde", "serde_derive", - "sha2", + "sha2 0.9.9", "slog", "slog-async", "slog-term", "smallvec", "snap", "strum", - "superstruct 0.2.0", + "superstruct", "task_executor", "tempfile", "tiny-keccak 2.0.2", @@ -3286,6 +3433,7 @@ dependencies = [ "tokio-util", "types", "unsigned-varint 0.6.0", + "void", ] [[package]] @@ -3303,28 +3451,6 @@ version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" -[[package]] -name = "lmdb" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0908efb5d6496aa977d96f91413da2635a902e5e31dbef0bfb88986c248539" -dependencies = [ - "bitflags", - "libc", - "lmdb-sys", -] - -[[package]] -name = "lmdb-sys" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5b392838cfe8858e86fac37cf97a0e8c55cc60ba0a18365cadc33092f128ce9" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "lock_api" version = "0.4.5" @@ -3364,9 +3490,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.6.6" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" +checksum = "274353858935c992b13c0ca408752e2121da852d07dec7ce5f108c77dfa14d1f" dependencies = [ "hashbrown", ] @@ -3420,9 +3546,9 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" -version = "0.0.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata", ] @@ -3433,6 +3559,18 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +[[package]] +name = "mdbx-sys" +version = "0.11.4-git.20210105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b21b3e0def3a5c880f6388ed2e33b695097c6b0eca039dae6010527b059f8be1" +dependencies = [ + "bindgen", + "cc", + "cmake", + "libc", +] + [[package]] name = "memchr" version = "2.4.1" @@ -3441,9 +3579,9 @@ checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg 1.0.1", ] @@ -3478,11 +3616,11 @@ version = "0.1.0" dependencies = [ "derivative", "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "parking_lot", "serde", "smallvec", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "typenum", ] @@ -3502,6 +3640,12 @@ dependencies = [ "unicase", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.4.4" @@ -3578,10 +3722,10 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "752a61cd890ff691b4411423d23816d5866dd5621e4d1c5687a53b94b5a979d8" dependencies = [ - "digest", + "digest 0.9.0", "generic-array", "multihash-derive", - "sha2", + "sha2 0.9.9", "unsigned-varint 0.7.1", ] @@ -3605,6 +3749,24 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +[[package]] +name = "multipart" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050aeedc89243f5347c3e237e3e13dc76fbe4ae3742a57b94dc14f69acf76d4" +dependencies = [ + "buf_redux", + "httparse", + "log", + "mime", + "mime_guess", + "quick-error", + "rand 0.7.3", + "safemem", + "tempfile", + "twoway", +] + [[package]] name = "multipart" version = "0.18.0" @@ -3632,7 +3794,20 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.8", + "pin-project 1.0.10", + "smallvec", + "unsigned-varint 0.7.1", +] + +[[package]] +name = "multistream-select" +version = "0.11.0" +source = "git+https://github.com/libp2p/rust-libp2p?rev=17861d9cac121f7e448585a7f052d5eab4618826#17861d9cac121f7e448585a7f052d5eab4618826" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project 1.0.10", "smallvec", "unsigned-varint 0.7.1", ] @@ -3662,15 +3837,15 @@ dependencies = [ "beacon_chain", "environment", "error-chain", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz_types 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", + "eth2_ssz_types", "exit-future", "fnv", "futures", "genesis", "hashset_delay", "hex", - "if-addrs", + "if-addrs 0.6.7", "igd", "itertools", "lazy_static", @@ -3712,9 +3887,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.22.2" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3bb9a13fa32bc5aeb64150cd3f32d6cf4c748f8f8a417cce5d2eb976a8370ba" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", "cc", @@ -3749,6 +3924,17 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" +[[package]] +name = "nom" +version = "7.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" +dependencies = [ + "memchr", + "minimal-lexical", + "version_check", +] + [[package]] name = "ntapi" version = "0.3.6" @@ -3820,9 +4006,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -3839,9 +4025,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "oorandom" @@ -3857,12 +4043,12 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "open-metrics-client" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7337d80c23c2d8b1349563981bc4fb531220733743ba8115454a67b181173f0d" +checksum = "9e224744b2e4da5b241857d2363a13bce60425f7b6ae2a5ff88d4d5557d9cc85" dependencies = [ "dtoa", - "itoa", + "itoa 0.4.8", "open-metrics-client-derive-text-encode", "owning_ref", ] @@ -3894,24 +4080,24 @@ dependencies = [ [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.0.2+3.0.0" +version = "111.17.0+1.1.1m" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14a760a11390b1a5daf72074d4f6ff1a6e772534ae191f999f57e9ee8146d1fb" +checksum = "05d6a336abd10814198f66e2a91ccd7336611f30334119ca8ce300536666fcf4" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.71" +version = "0.9.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7df13d165e607909b363a4757a6f133f8a818a74e9d3a98d09c6128e15fa4c73" +checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" dependencies = [ "autocfg 1.0.1", "cc", @@ -3927,7 +4113,7 @@ version = "0.2.0" dependencies = [ "beacon_chain", "derivative", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "itertools", "lazy_static", @@ -3950,6 +4136,18 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "p256" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0e0c5310031b5d4528ac6534bccc1446c289ac45c47b277d5aa91089c5f74fa" +dependencies = [ + "ecdsa 0.13.4", + "elliptic-curve 0.11.6", + "sec1", + "sha2 0.9.9", +] + [[package]] name = "parity-scale-codec" version = "1.3.7" @@ -4037,6 +4235,12 @@ dependencies = [ "crypto-mac 0.11.1", ] +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + [[package]] name = "percent-encoding" version = "2.1.0" @@ -4052,49 +4256,39 @@ dependencies = [ "ucd-trie", ] -[[package]] -name = "petgraph" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" -dependencies = [ - "fixedbitset 0.2.0", - "indexmap", -] - [[package]] name = "petgraph" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" dependencies = [ - "fixedbitset 0.4.0", + "fixedbitset", "indexmap", ] [[package]] name = "pin-project" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "918192b5c59119d51e0cd221f4d49dde9112824ba717369e903c97d076083d0f" +checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" dependencies = [ - "pin-project-internal 0.4.28", + "pin-project-internal 0.4.29", ] [[package]] name = "pin-project" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ - "pin-project-internal 1.0.8", + "pin-project-internal 1.0.10", ] [[package]] name = "pin-project-internal" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e" +checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" dependencies = [ "proc-macro2", "quote", @@ -4103,9 +4297,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -4120,9 +4314,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -4136,15 +4330,15 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9c2f795bc591cb3384cb64082a578b89207ac92bb89c9d98c1ea2ace7cd8110" dependencies = [ - "der", + "der 0.3.5", "spki", ] [[package]] name = "pkg-config" -version = "0.3.22" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" +checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "platforms" @@ -4205,9 +4399,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "primitive-types" @@ -4288,17 +4482,11 @@ version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.32" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] @@ -4311,7 +4499,7 @@ checksum = "6ab1427f3d2635891f842892dda177883dca0639e05fe66796a62c9d2f23b49c" dependencies = [ "byteorder", "libc", - "nom", + "nom 2.2.1", "rustc_version 0.2.3", ] @@ -4330,16 +4518,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "prost" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" -dependencies = [ - "bytes", - "prost-derive 0.8.0", -] - [[package]] name = "prost" version = "0.9.0" @@ -4347,25 +4525,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ "bytes", - "prost-derive 0.9.0", -] - -[[package]] -name = "prost-build" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" -dependencies = [ - "bytes", - "heck", - "itertools", - "log", - "multimap", - "petgraph 0.5.1", - "prost 0.8.0", - "prost-types 0.8.0", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -4380,27 +4540,14 @@ dependencies = [ "lazy_static", "log", "multimap", - "petgraph 0.6.0", - "prost 0.9.0", - "prost-types 0.9.0", + "petgraph", + "prost", + "prost-types", "regex", "tempfile", "which", ] -[[package]] -name = "prost-derive" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.9.0" @@ -4414,16 +4561,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" -dependencies = [ - "bytes", - "prost 0.8.0", -] - [[package]] name = "prost-types" version = "0.9.0" @@ -4431,14 +4568,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ "bytes", - "prost 0.9.0", + "prost", ] [[package]] name = "proto_array" version = "0.2.0" dependencies = [ - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde", "serde_derive", @@ -4512,9 +4649,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.10" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" dependencies = [ "proc-macro2", ] @@ -4618,7 +4755,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", ] [[package]] @@ -4688,7 +4825,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "redox_syscall", ] @@ -4729,15 +4866,16 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d2927ca2f685faf0fc620ac4834690d29e7abb153add10f5812eef20b5e280" +checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" dependencies = [ "base64 0.13.0", "bytes", "encoding_rs", "futures-core", "futures-util", + "h2", "http", "http-body", "hyper", @@ -4749,12 +4887,13 @@ dependencies = [ "mime", "native-tls", "percent-encoding", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-util", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -4772,6 +4911,17 @@ dependencies = [ "quick-error", ] +[[package]] +name = "rfc6979" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +dependencies = [ + "crypto-bigint", + "hmac 0.11.0", + "zeroize", +] + [[package]] name = "ring" version = "0.16.20" @@ -4824,9 +4974,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.25.3" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57adcf67c8faaf96f3248c2a7b419a0dbc52ebe36ba83dd57fe83827c1ea4eb3" +checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152" dependencies = [ "bitflags", "fallible-iterator", @@ -4897,9 +5047,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac4581f0fc0e0efd529d069e8189ec7b90b8e7680e21beb35141bdc45f36040" +checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" dependencies = [ "log", "ring", @@ -4909,9 +5059,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] name = "rw-stream-sink" @@ -4920,15 +5070,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ "futures", - "pin-project 0.4.28", + "pin-project 0.4.29", "static_assertions", ] [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "safe_arith" @@ -4998,7 +5148,7 @@ dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", "salsa20", - "sha2", + "sha2 0.9.9", ] [[package]] @@ -5021,6 +5171,18 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sec1" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +dependencies = [ + "der 0.5.1", + "generic-array", + "subtle", + "zeroize", +] + [[package]] name = "secp256k1" version = "0.20.3" @@ -5032,9 +5194,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "827cb7cce42533829c792fc51b82fbf18b125b45a702ef2c8be77fce65463a7b" +checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" dependencies = [ "cc", ] @@ -5111,13 +5273,23 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.130" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" dependencies = [ "serde_derive", ] +[[package]] +name = "serde_array_query" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89c6e82b1005b33d5b2bbc47096800e5ad6b67ef5636f9c13ad29a6935734a7" +dependencies = [ + "serde", + "serde_urlencoded", +] + [[package]] name = "serde_cbor" version = "0.11.2" @@ -5130,9 +5302,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" dependencies = [ "proc-macro2", "quote", @@ -5141,11 +5313,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.70" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e277c495ac6cd1a01a58d0a0c574568b4d1ddf14f59965c6a58b8d96400b54f3" +checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" dependencies = [ - "itoa", + "itoa 1.0.1", "ryu", "serde", ] @@ -5168,19 +5340,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" dependencies = [ "form_urlencoded", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] [[package]] name = "serde_yaml" -version = "0.8.21" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c608a35705a5d3cdc9fbe403147647ff34b921f8e833e49306df898f9b20af" +checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" dependencies = [ - "dtoa", "indexmap", + "ryu", "serde", "yaml-rust", ] @@ -5191,34 +5363,45 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if", "cpufeatures 0.2.1", - "digest", + "digest 0.9.0", "opaque-debug", ] [[package]] name = "sha2" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if", "cpufeatures 0.2.1", - "digest", + "digest 0.9.0", "opaque-debug", ] +[[package]] +name = "sha2" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99c3bd8169c58782adad9290a9af5939994036b76187f7b4f0e6de91dbbfc0ec" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.1", + "digest 0.10.1", +] + [[package]] name = "sha3" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.9.0", + "digest 0.9.0", "keccak", "opaque-debug", ] @@ -5232,6 +5415,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -5247,7 +5436,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2807892cfa58e081aa1f1111391c7a0649d4fa127a4ffbe34bcbfb35a1171a4" dependencies = [ - "digest", + "digest 0.9.0", "rand_core 0.6.3", ] @@ -5280,15 +5469,15 @@ version = "0.1.0" dependencies = [ "bincode", "byteorder", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "filesystem", "flate2", "lazy_static", + "libmdbx", "lighthouse_metrics", - "lmdb", - "lmdb-sys", "logging", + "lru", "maplit", "parking_lot", "rand 0.7.3", @@ -5299,7 +5488,7 @@ dependencies = [ "slog", "sloggers", "tempfile", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "tree_hash_derive 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "types", ] @@ -5325,7 +5514,8 @@ dependencies = [ name = "slashing_protection" version = "0.1.0" dependencies = [ - "eth2_serde_utils 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "arbitrary", + "eth2_serde_utils", "filesystem", "lazy_static", "r2d2", @@ -5428,6 +5618,7 @@ dependencies = [ "serde", "slog", "slog-async", + "slog-json", "slog-kvfilter", "slog-scope", "slog-stdlog", @@ -5449,9 +5640,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "snap" @@ -5472,7 +5663,7 @@ dependencies = [ "rand_core 0.6.3", "ring", "rustc_version 0.3.3", - "sha2", + "sha2 0.9.9", "subtle", "x25519-dalek", ] @@ -5541,7 +5732,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dae7e047abc519c96350e9484a96c6bf1492348af912fd3446dd2dc323f6268" dependencies = [ - "der", + "der 0.3.5", ] [[package]] @@ -5559,8 +5750,8 @@ dependencies = [ "bls", "env_logger 0.9.0", "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz_types 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", + "eth2_ssz_types", "int_to_bytes", "integer-sqrt", "itertools", @@ -5570,7 +5761,7 @@ dependencies = [ "rayon", "safe_arith", "smallvec", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "types", ] @@ -5579,7 +5770,7 @@ name = "state_transition_vectors" version = "0.1.0" dependencies = [ "beacon_chain", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "lazy_static", "state_processing", "types", @@ -5598,7 +5789,7 @@ dependencies = [ "beacon_chain", "db-key", "directory", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "itertools", "lazy_static", @@ -5612,7 +5803,7 @@ dependencies = [ "sloggers", "state_processing", "tempfile", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "types", ] @@ -5657,22 +5848,11 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "superstruct" -version = "0.1.0" -dependencies = [ - "darling 0.13.0", - "itertools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "superstruct" -version = "0.2.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf7f6700d7c135cf4e4900c2cfba9a12ecad1fdc45594aad48f6b344b2589a0" +checksum = "4e623e69a04a6352677c1f892027e14e034dfc6c4aabed0a4a0be9c1a0a46cee" dependencies = [ - "darling 0.12.4", + "darling", "itertools", "proc-macro2", "quote", @@ -5690,9 +5870,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.81" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" +checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" dependencies = [ "proc-macro2", "quote", @@ -5750,13 +5930,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ "cfg-if", + "fastrand", "libc", - "rand 0.8.4", "redox_syscall", "remove_dir_all", "winapi", @@ -5887,7 +6067,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2", + "sha2 0.9.9", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -5939,11 +6119,10 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70e992e41e0d2fb9f755b37446f20900f64446ef54874f40a60c78f021ac6144" +checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" dependencies = [ - "autocfg 1.0.1", "bytes", "libc", "memchr", @@ -5951,7 +6130,7 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "signal-hook-registry", "tokio-macros", "winapi", @@ -5959,19 +6138,19 @@ dependencies = [ [[package]] name = "tokio-io-timeout" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90c49f106be240de154571dd31fbe48acb10ba6c6dd6f6517ad603abffa42de9" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "tokio", ] [[package]] name = "tokio-macros" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9efc1aba077437943f7515666aa2b882dfabfbfdf89c819ea75a8d6e9eaba5e" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", @@ -6006,11 +6185,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "tokio", "tokio-util", ] +[[package]] +name = "tokio-tungstenite" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" +dependencies = [ + "futures-util", + "log", + "pin-project 1.0.10", + "tokio", + "tungstenite 0.12.0", +] + [[package]] name = "tokio-tungstenite" version = "0.15.0" @@ -6019,9 +6211,9 @@ checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ "futures-util", "log", - "pin-project 1.0.8", + "pin-project 1.0.10", "tokio", - "tungstenite", + "tungstenite 0.14.0", ] [[package]] @@ -6035,7 +6227,7 @@ dependencies = [ "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "slab", "tokio", ] @@ -6063,7 +6255,7 @@ checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.8", "tracing-attributes", "tracing-core", ] @@ -6088,6 +6280,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project 1.0.10", + "tracing", +] + [[package]] name = "tracing-log" version = "0.1.2" @@ -6099,36 +6301,22 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] - [[package]] name = "tracing-subscriber" -version = "0.2.25" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +checksum = "5d81bfa81424cc98cb034b837c985b7a290f592e5b4322f353f94a0ab0f9f594" dependencies = [ - "ansi_term 0.12.1", - "chrono", + "ansi_term", "lazy_static", "matchers", "regex", - "serde", - "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", - "tracing-serde", ] [[package]] @@ -6152,11 +6340,11 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.4.0" +version = "0.4.1" dependencies = [ "beacon_chain", "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.12.1", "rand 0.7.3", @@ -6165,22 +6353,11 @@ dependencies = [ "types", ] -[[package]] -name = "tree_hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9c8a86fad3169a65aad2265d3c6a8bc119d0b771046af3c1b2fb0e9b12182b" -dependencies = [ - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ethereum-types 0.12.1", - "smallvec", -] - [[package]] name = "tree_hash_derive" version = "0.4.0" dependencies = [ - "darling 0.13.0", + "darling", "quote", "syn", ] @@ -6191,7 +6368,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cd22d128157837a4434bb51119aef11103f17bfe8c402ce688cf25aa1e608ad" dependencies = [ - "darling 0.13.0", + "darling", "quote", "syn", ] @@ -6247,6 +6424,25 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "tungstenite" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +dependencies = [ + "base64 0.13.0", + "byteorder", + "bytes", + "http", + "httparse", + "input_buffer", + "log", + "rand 0.8.4", + "sha-1", + "url", + "utf-8", +] + [[package]] name = "tungstenite" version = "0.14.0" @@ -6277,9 +6473,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "types" @@ -6295,10 +6491,10 @@ dependencies = [ "derivative", "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "eth2_interop_keypairs", - "eth2_serde_utils 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_serde_utils", + "eth2_ssz", "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_ssz_types 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_types", "ethereum-types 0.12.1", "hex", "int_to_bytes", @@ -6315,14 +6511,15 @@ dependencies = [ "safe_arith", "serde", "serde_derive", + "serde_json", "serde_yaml", "slog", "state_processing", - "superstruct 0.1.0", + "superstruct", "swap_or_not_shuffle", "tempfile", "test_random_derive", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "tree_hash_derive 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -6465,7 +6662,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "serde", ] @@ -6484,7 +6681,7 @@ dependencies = [ "environment", "eth2", "eth2_keystore", - "eth2_serde_utils 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_serde_utils", "exit-future", "filesystem", "futures", @@ -6512,11 +6709,11 @@ dependencies = [ "task_executor", "tempfile", "tokio", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "types", "url", "validator_dir", - "warp", + "warp 0.3.2", "warp_utils", ] @@ -6533,7 +6730,7 @@ dependencies = [ "lockfile", "rand 0.7.3", "tempfile", - "tree_hash 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tree_hash", "types", ] @@ -6551,9 +6748,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "void" @@ -6582,6 +6779,36 @@ dependencies = [ "try-lock", ] +[[package]] +name = "warp" +version = "0.3.0" +source = "git+https://github.com/macladson/warp?rev=dfa259e#dfa259e19b7490e6bc4bf247e8b76f671d29a0eb" +dependencies = [ + "bytes", + "futures", + "headers", + "http", + "hyper", + "log", + "mime", + "mime_guess", + "multipart 0.17.1", + "percent-encoding", + "pin-project 1.0.10", + "scoped-tls", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-rustls", + "tokio-stream", + "tokio-tungstenite 0.13.0", + "tokio-util", + "tower-service", + "tracing", + "tracing-futures", +] + [[package]] name = "warp" version = "0.3.2" @@ -6596,9 +6823,9 @@ dependencies = [ "log", "mime", "mime_guess", - "multipart", + "multipart 0.18.0", "percent-encoding", - "pin-project 1.0.8", + "pin-project 1.0.10", "scoped-tls", "serde", "serde_json", @@ -6606,7 +6833,7 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-stream", - "tokio-tungstenite", + "tokio-tungstenite 0.15.0", "tokio-util", "tower-service", "tracing", @@ -6623,10 +6850,11 @@ dependencies = [ "lighthouse_metrics", "safe_arith", "serde", + "serde_array_query", "state_processing", "tokio", "types", - "warp", + "warp 0.3.2", ] [[package]] @@ -6760,7 +6988,7 @@ dependencies = [ "jsonrpc-core", "log", "parking_lot", - "pin-project 1.0.8", + "pin-project 1.0.10", "reqwest", "rlp 0.5.1", "secp256k1", @@ -6833,9 +7061,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c475786c6f47219345717a043a37ec04cb4bc185e28853adcc4fa0a947eba630" +checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" dependencies = [ "webpki 0.22.0", ] diff --git a/Cargo.toml b/Cargo.toml index b005ce1c19..d27c1dc132 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,7 @@ members = [ "beacon_node/client", "beacon_node/eth1", "beacon_node/lighthouse_network", + "beacon_node/execution_layer", "beacon_node/http_api", "beacon_node/http_metrics", "beacon_node/network", @@ -87,3 +88,7 @@ members = [ [patch.crates-io] fixed-hash = { git = "https://github.com/paritytech/parity-common", rev="df638ab0885293d21d656dc300d39236b69ce57d" } warp = { git = "https://github.com/macladson/warp", rev ="7e75acc" } +eth2_ssz = { path = "consensus/ssz" } +eth2_ssz_types = { path = "consensus/ssz_types" } +tree_hash = { path = "consensus/tree_hash" } +eth2_serde_utils = { path = "consensus/serde_utils" } diff --git a/Cross.toml b/Cross.toml index 050f2bdbd7..2db3992464 100644 --- a/Cross.toml +++ b/Cross.toml @@ -2,3 +2,14 @@ passthrough = [ "RUSTFLAGS", ] + +# These custom images are required to work around the lack of Clang in the default `cross` images. +# We need Clang to run `bindgen` for MDBX, and the `BINDGEN_EXTRA_CLANG_ARGS` flags must also be set +# while cross-compiling for ARM to prevent bindgen from attempting to include headers from the host. +# +# For more information see https://github.com/rust-embedded/cross/pull/608 +[target.x86_64-unknown-linux-gnu] +image = "michaelsproul/cross-clang:x86_64-latest" + +[target.aarch64-unknown-linux-gnu] +image = "michaelsproul/cross-clang:aarch64-latest" diff --git a/Dockerfile b/Dockerfile index f8475012e3..5ca8cbc964 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,11 @@ -FROM rust:1.53.0 AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake +FROM rust:1.56.1-bullseye AS builder +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE RUN cd lighthouse && make -FROM debian:buster-slim +FROM ubuntu:latest RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ diff --git a/Dockerfile.cross b/Dockerfile.cross index 17402b4400..c8bd868878 100644 --- a/Dockerfile.cross +++ b/Dockerfile.cross @@ -1,7 +1,7 @@ # This image is meant to enable cross-architecture builds. # It assumes the lighthouse binary has already been # compiled for `$TARGETPLATFORM` and moved to `./bin`. -FROM --platform=$TARGETPLATFORM debian:buster-slim +FROM --platform=$TARGETPLATFORM ubuntu:latest RUN apt-get update && apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ diff --git a/Makefile b/Makefile index 6856635ebd..a4b880b806 100644 --- a/Makefile +++ b/Makefile @@ -144,6 +144,7 @@ test-full: cargo-fmt test-release test-debug test-ef # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: cargo clippy --workspace --tests -- \ + -D clippy::fn_to_numeric_cast_any \ -D warnings \ -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ @@ -157,9 +158,10 @@ lint: make-ef-tests: make -C $(EF_TESTS) -# Verifies that state_processing feature arbitrary-fuzz will compile +# Verifies that crates compile with fuzzing features enabled arbitrary-fuzz: - cargo check --manifest-path=consensus/state_processing/Cargo.toml --features arbitrary-fuzz + cargo check -p state_processing --features arbitrary-fuzz + cargo check -p slashing_protection --features arbitrary-fuzz # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: diff --git a/README.md b/README.md index 8c53675234..00900b8c3d 100644 --- a/README.md +++ b/README.md @@ -66,8 +66,7 @@ of the Lighthouse book. ## Contact The best place for discussion is the [Lighthouse Discord -server](https://discord.gg/cyAszAh). Alternatively, you may use the -[sigp/lighthouse gitter](https://gitter.im/sigp/lighthouse). +server](https://discord.gg/cyAszAh). Sign up to the [Lighthouse Development Updates](http://eepurl.com/dh9Lvb) mailing list for email notifications about releases, network status and other important information. diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 738cbf16f0..221c31caf6 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -84,8 +84,8 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< Timeouts::set_all(Duration::from_secs(env.eth2_config.spec.seconds_per_slot)), ); - let testnet_config = env - .testnet + let eth2_network_config = env + .eth2_network_config .clone() .expect("network should have a valid config"); @@ -95,7 +95,7 @@ pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result< &client, &spec, stdin_inputs, - &testnet_config, + ð2_network_config, no_wait, ))?; @@ -109,11 +109,11 @@ async fn publish_voluntary_exit( client: &BeaconNodeHttpClient, spec: &ChainSpec, stdin_inputs: bool, - testnet_config: &Eth2NetworkConfig, + eth2_network_config: &Eth2NetworkConfig, no_wait: bool, ) -> Result<(), String> { let genesis_data = get_geneisis_data(client).await?; - let testnet_genesis_root = testnet_config + let testnet_genesis_root = eth2_network_config .beacon_state::() .as_ref() .expect("network should have valid genesis state") diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 6eb7911139..f43dfcdb8f 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -1,4 +1,5 @@ use crate::wallet::create::{PASSWORD_FLAG, STDIN_INPUTS_FLAG}; +use account_utils::validator_definitions::SigningDefinition; use account_utils::{ eth2_keystore::Keystore, read_password_from_user, @@ -208,10 +209,35 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin } }; + let voting_pubkey = keystore + .public_key() + .ok_or_else(|| format!("Keystore public key is invalid: {}", keystore.pubkey()))?; + // The keystore is placed in a directory that matches the name of the public key. This // provides some loose protection against adding the same keystore twice. let dest_dir = validator_dir.join(format!("0x{}", keystore.pubkey())); if dest_dir.exists() { + // Check if we should update password for existing validator in case if it was provided via reimport: #2854 + let old_validator_def_opt = defs + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == voting_pubkey); + if let Some(ValidatorDefinition { + signing_definition: + SigningDefinition::LocalKeystore { + voting_keystore_password: ref mut old_passwd, + .. + }, + .. + }) = old_validator_def_opt + { + if old_passwd.is_none() && password_opt.is_some() { + *old_passwd = password_opt; + defs.save(&validator_dir) + .map_err(|e| format!("Unable to save {}: {:?}", CONFIG_FILENAME, e))?; + eprintln!("Password updated for public key {}", voting_pubkey); + } + } eprintln!( "Skipping import of keystore for existing public key: {:?}", src_keystore @@ -234,9 +260,6 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin .map_err(|e| format!("Unable to copy keystore: {:?}", e))?; // Register with slashing protection. - let voting_pubkey = keystore - .public_key() - .ok_or_else(|| format!("Keystore public key is invalid: {}", keystore.pubkey()))?; slashing_protection .register_validator(voting_pubkey.compress()) .map_err(|e| { diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index 67902b7d29..e56a70472c 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -82,11 +82,11 @@ pub fn cli_run( ) -> Result<(), String> { let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME); - let testnet_config = env - .testnet + let eth2_network_config = env + .eth2_network_config .ok_or("Unable to get testnet configuration from the environment")?; - let genesis_validators_root = testnet_config + let genesis_validators_root = eth2_network_config .beacon_state::() .map(|state: BeaconState| state.genesis_validators_root()) .map_err(|e| { diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 80f9182efe..eecef0349e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.0.1" +version = "2.1.0" authors = ["Paul Hauner ", "Age Manning ", "Age Manning "] edition = "2018" +autotests = false # using a single test binary compiles faster [features] default = ["participation_metrics"] @@ -26,14 +27,14 @@ rayon = "1.4.1" serde = "1.0.116" serde_derive = "1.0.116" slog = { version = "2.5.2", features = ["max_level_trace"] } -sloggers = "2.0.2" +sloggers = { version = "2.1.1", features = ["json"] } slot_clock = { path = "../../common/slot_clock" } eth2_hashing = "0.2.0" -eth2_ssz = "0.4.0" -eth2_ssz_types = "0.2.1" +eth2_ssz = "0.4.1" +eth2_ssz_types = "0.2.2" eth2_ssz_derive = "0.3.0" state_processing = { path = "../../consensus/state_processing" } -tree_hash = "0.4.0" +tree_hash = "0.4.1" types = { path = "../../consensus/types" } tokio = "1.14.0" eth1 = { path = "../eth1" } @@ -42,7 +43,7 @@ genesis = { path = "../genesis" } int_to_bytes = { path = "../../consensus/int_to_bytes" } rand = "0.7.3" proto_array = { path = "../../consensus/proto_array" } -lru = "0.6.0" +lru = "0.7.1" tempfile = "3.1.0" bitvec = "0.19.3" bls = { path = "../../crypto/bls" } @@ -55,3 +56,10 @@ slasher = { path = "../../slasher" } eth2 = { path = "../../common/eth2" } strum = { version = "0.21.0", features = ["derive"] } logging = { path = "../../common/logging" } +execution_layer = { path = "../execution_layer" } +sensitive_url = { path = "../../common/sensitive_url" } +superstruct = "0.4.0" + +[[test]] +name = "beacon_chain_tests" +path = "tests/main.rs" diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index c672ff6be6..6692aa48cd 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -183,24 +183,6 @@ pub enum Error { /// single-participant attestation from this validator for this epoch and should not observe /// another. PriorAttestationKnown { validator_index: u64, epoch: Epoch }, - /// The attestation is for an epoch in the future (with respect to the gossip clock disparity). - /// - /// ## Peer scoring - /// - /// Assuming the local clock is correct, the peer has sent an invalid message. - FutureEpoch { - attestation_epoch: Epoch, - current_epoch: Epoch, - }, - /// The attestation is for an epoch in the past (with respect to the gossip clock disparity). - /// - /// ## Peer scoring - /// - /// Assuming the local clock is correct, the peer has sent an invalid message. - PastEpoch { - attestation_epoch: Epoch, - current_epoch: Epoch, - }, /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the /// future). /// @@ -452,7 +434,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(chain, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation)?; // Check the attestation's epoch matches its target. if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) @@ -716,7 +698,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(chain, attestation)?; + verify_propagation_slot_range(&chain.slot_clock, attestation)?; // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // aggregation bit set. @@ -986,11 +968,17 @@ fn verify_head_block_is_known( attestation: &Attestation, max_skip_slots: Option, ) -> Result { - if let Some(block) = chain + let block_opt = chain .fork_choice .read() .get_block(&attestation.data.beacon_block_root) - { + .or_else(|| { + chain + .early_attester_cache + .get_proto_block(attestation.data.beacon_block_root) + }); + + if let Some(block) = block_opt { // Reject any block that exceeds our limit on skipped slots. if let Some(max_skip_slots) = max_skip_slots { if attestation.data.slot > block.slot + max_skip_slots { @@ -1013,14 +1001,13 @@ fn verify_head_block_is_known( /// to the current slot of the `chain`. /// /// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. -pub fn verify_propagation_slot_range( - chain: &BeaconChain, - attestation: &Attestation, +pub fn verify_propagation_slot_range( + slot_clock: &S, + attestation: &Attestation, ) -> Result<(), Error> { let attestation_slot = attestation.data.slot; - let latest_permissible_slot = chain - .slot_clock + let latest_permissible_slot = slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; if attestation_slot > latest_permissible_slot { @@ -1031,11 +1018,10 @@ pub fn verify_propagation_slot_range( } // Taking advantage of saturating subtraction on `Slot`. - let earliest_permissible_slot = chain - .slot_clock + let earliest_permissible_slot = slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)? - - T::EthSpec::slots_per_epoch(); + - E::slots_per_epoch(); if attestation_slot < earliest_permissible_slot { return Err(Error::PastSlot { attestation_slot, @@ -1242,7 +1228,9 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - if !chain.fork_choice.read().contains_block(&target.root) { + if !chain.fork_choice.read().contains_block(&target.root) + && !chain.early_attester_cache.contains_block(target.root) + { return Err(Error::UnknownTargetRoot(target.root)); } diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index 01662efc13..24963a125d 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -75,7 +75,7 @@ impl From for Error { /// Stores the minimal amount of data required to compute the committee length for any committee at any /// slot in a given `epoch`. -struct CommitteeLengths { +pub struct CommitteeLengths { /// The `epoch` to which the lengths pertain. epoch: Epoch, /// The length of the shuffling in `self.epoch`. @@ -84,7 +84,7 @@ struct CommitteeLengths { impl CommitteeLengths { /// Instantiate `Self` using `state.current_epoch()`. - fn new(state: &BeaconState, spec: &ChainSpec) -> Result { + pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { let active_validator_indices_len = if let Ok(committee_cache) = state.committee_cache(RelativeEpoch::Current) { @@ -101,8 +101,16 @@ impl CommitteeLengths { }) } + /// Get the count of committees per each slot of `self.epoch`. + pub fn get_committee_count_per_slot( + &self, + spec: &ChainSpec, + ) -> Result { + T::get_committee_count_per_slot(self.active_validator_indices_len, spec).map_err(Into::into) + } + /// Get the length of the committee at the given `slot` and `committee_index`. - fn get( + pub fn get_committee_length( &self, slot: Slot, committee_index: CommitteeIndex, @@ -120,8 +128,7 @@ impl CommitteeLengths { } let slots_per_epoch = slots_per_epoch as usize; - let committees_per_slot = - T::get_committee_count_per_slot(self.active_validator_indices_len, spec)?; + let committees_per_slot = self.get_committee_count_per_slot::(spec)?; let index_in_epoch = compute_committee_index_in_epoch( slot, slots_per_epoch, @@ -172,7 +179,7 @@ impl AttesterCacheValue { spec: &ChainSpec, ) -> Result<(JustifiedCheckpoint, CommitteeLength), Error> { self.committee_lengths - .get::(slot, committee_index, spec) + .get_committee_length::(slot, committee_index, spec) .map(|committee_length| (self.current_justified_checkpoint, committee_length)) } } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5f8b70bf44..4e1d54dc13 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -12,9 +12,11 @@ use crate::block_verification::{ IntoFullyVerifiedBlock, }; use crate::chain_config::ChainConfig; +use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; +use crate::execution_payload::get_execution_payload; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; use crate::migrate::BackgroundMigrator; @@ -49,23 +51,26 @@ use crate::{metrics, BeaconChainError}; use eth2::types::{ EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty, }; -use fork_choice::ForkChoice; +use execution_layer::ExecutionLayer; +use fork_choice::{AttestationFromBlock, ForkChoice}; use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; +use proto_array::ExecutionStatus; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; +use ssz::Encode; use state_processing::{ common::get_indexed_attestation, per_block_processing, - per_block_processing::errors::AttestationValidationError, + per_block_processing::{errors::AttestationValidationError, is_merge_transition_complete}, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, - BlockSignatureStrategy, SigVerifiedOp, + BlockSignatureStrategy, SigVerifiedOp, VerifyBlockRoot, }; use std::borrow::Cow; use std::cmp::Ordering; @@ -103,6 +108,9 @@ pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::zero(); pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::zero(); +/// Defines how old a block can be before it's no longer a candidate for the early attester cache. +const EARLY_ATTESTER_CACHE_HISTORIC_SLOTS: u64 = 4; + /// Defines the behaviour when a block/block-root for a skipped slot is requested. pub enum WhenSlotSkipped { /// If the slot is a skip slot, return `None`. @@ -191,6 +199,8 @@ pub struct HeadInfo { pub genesis_time: u64, pub genesis_validators_root: Hash256, pub proposer_shuffling_decision_root: Hash256, + pub is_merge_transition_complete: bool, + pub execution_payload_block_hash: Option, } pub trait BeaconChainTypes: Send + Sync + 'static { @@ -201,6 +211,25 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type EthSpec: types::EthSpec; } +/// Indicates the EL payload verification status of the head beacon block. +#[derive(Debug, PartialEq)] +pub enum HeadSafetyStatus { + /// The head block has either been verified by an EL or is does not require EL verification + /// (e.g., it is pre-merge or pre-terminal-block). + /// + /// If the block is post-terminal-block, `Some(execution_payload.block_hash)` is included with + /// the variant. + Safe(Option), + /// The head block execution payload has not yet been verified by an EL. + /// + /// The `execution_payload.block_hash` of the head block is returned. + Unsafe(Hash256), + /// The head block execution payload was deemed to be invalid by an EL. + /// + /// The `execution_payload.block_hash` of the head block is returned. + Invalid(Hash256), +} + pub type BeaconForkChoice = ForkChoice< BeaconForkChoiceStore< ::EthSpec, @@ -275,6 +304,8 @@ pub struct BeaconChain { Mutex, T::EthSpec>>, /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, + /// Interfaces with the execution client. + pub execution_layer: Option, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. pub(crate) canonical_head: TimeoutRwLock>, /// The root of the genesis block. @@ -301,10 +332,10 @@ pub struct BeaconChain { pub(crate) validator_pubkey_cache: TimeoutRwLock>, /// A cache used when producing attestations. pub(crate) attester_cache: Arc, + /// A cache used when producing attestations whilst the head block is still being imported. + pub early_attester_cache: EarlyAttesterCache, /// A cache used to keep track of various block timings. pub block_times_cache: Arc>, - /// A list of any hard-coded forks that have been disabled. - pub disabled_forks: Vec, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. pub shutdown_sender: Sender, @@ -461,7 +492,7 @@ impl BeaconChain { pub fn forwards_iter_block_roots( &self, start_slot: Slot, - ) -> Result>, Error> { + ) -> Result> + '_, Error> { let oldest_block_slot = self.store.get_oldest_block_slot(); if start_slot < oldest_block_slot { return Err(Error::HistoricalBlockError( @@ -474,8 +505,7 @@ impl BeaconChain { let local_head = self.head()?; - let iter = HotColdDB::forwards_block_roots_iterator( - self.store.clone(), + let iter = self.store.forwards_block_roots_iterator( start_slot, local_head.beacon_state, local_head.beacon_block_root, @@ -485,6 +515,43 @@ impl BeaconChain { Ok(iter.map(|result| result.map_err(Into::into))) } + /// Even more efficient variant of `forwards_iter_block_roots` that will avoid cloning the head + /// state if it isn't required for the requested range of blocks. + pub fn forwards_iter_block_roots_until( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result> + '_, Error> { + let oldest_block_slot = self.store.get_oldest_block_slot(); + if start_slot < oldest_block_slot { + return Err(Error::HistoricalBlockError( + HistoricalBlockError::BlockOutOfRange { + slot: start_slot, + oldest_block_slot, + }, + )); + } + + self.with_head(move |head| { + let iter = self.store.forwards_block_roots_iterator_until( + start_slot, + end_slot, + || { + ( + head.beacon_state.clone_with_only_committee_caches(), + head.beacon_block_root, + ) + }, + &self.spec, + )?; + Ok(iter + .map(|result| result.map_err(Into::into)) + .take_while(move |result| { + result.as_ref().map_or(true, |(_, slot)| *slot <= end_slot) + })) + }) + } + /// Traverse backwards from `block_root` to find the block roots of its ancestors. /// /// ## Notes @@ -497,14 +564,14 @@ impl BeaconChain { pub fn rev_iter_block_roots_from( &self, block_root: Hash256, - ) -> Result>, Error> { + ) -> Result> + '_, Error> { let block = self .get_block(&block_root)? .ok_or(Error::MissingBeaconBlock(block_root))?; let state = self .get_state(&block.state_root(), Some(block.slot()))? .ok_or_else(|| Error::MissingBeaconState(block.state_root()))?; - let iter = BlockRootsIterator::owned(self.store.clone(), state); + let iter = BlockRootsIterator::owned(&self.store, state); Ok(std::iter::once(Ok((block_root, block.slot()))) .chain(iter) .map(|result| result.map_err(|e| e.into()))) @@ -591,12 +658,12 @@ impl BeaconChain { /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot /// returned may be earlier than the wall-clock slot. pub fn rev_iter_state_roots_from<'a>( - &self, + &'a self, state_root: Hash256, state: &'a BeaconState, ) -> impl Iterator> + 'a { std::iter::once(Ok((state_root, state.slot()))) - .chain(StateRootsIterator::new(self.store.clone(), state)) + .chain(StateRootsIterator::new(&self.store, state)) .map(|result| result.map_err(Into::into)) } @@ -610,11 +677,10 @@ impl BeaconChain { pub fn forwards_iter_state_roots( &self, start_slot: Slot, - ) -> Result>, Error> { + ) -> Result> + '_, Error> { let local_head = self.head()?; - let iter = HotColdDB::forwards_state_roots_iterator( - self.store.clone(), + let iter = self.store.forwards_state_roots_iterator( start_slot, local_head.beacon_state_root(), local_head.beacon_state, @@ -624,6 +690,36 @@ impl BeaconChain { Ok(iter.map(|result| result.map_err(Into::into))) } + /// Super-efficient forwards state roots iterator that avoids cloning the head if the state + /// roots lie entirely within the freezer database. + /// + /// The iterator returned will include roots for `start_slot..=end_slot`, i.e. it + /// is endpoint inclusive. + pub fn forwards_iter_state_roots_until( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result> + '_, Error> { + self.with_head(move |head| { + let iter = self.store.forwards_state_roots_iterator_until( + start_slot, + end_slot, + || { + ( + head.beacon_state.clone_with_only_committee_caches(), + head.beacon_state_root(), + ) + }, + &self.spec, + )?; + Ok(iter + .map(|result| result.map_err(Into::into)) + .take_while(move |result| { + result.as_ref().map_or(true, |(_, slot)| *slot <= end_slot) + })) + }) + } + /// Returns the block at the given slot, if any. Only returns blocks in the canonical chain. /// /// Use the `skips` parameter to define the behaviour when `request_slot` is a skipped slot. @@ -681,18 +777,21 @@ impl BeaconChain { return Ok(Some(root)); } - process_results(self.forwards_iter_state_roots(request_slot)?, |mut iter| { - if let Some((root, slot)) = iter.next() { - if slot == request_slot { - Ok(Some(root)) + process_results( + self.forwards_iter_state_roots_until(request_slot, request_slot)?, + |mut iter| { + if let Some((root, slot)) = iter.next() { + if slot == request_slot { + Ok(Some(root)) + } else { + // Sanity check. + Err(Error::InconsistentForwardsIter { request_slot, slot }) + } } else { - // Sanity check. - Err(Error::InconsistentForwardsIter { request_slot, slot }) + Ok(None) } - } else { - Ok(None) - } - })? + }, + )? } /// Returns the block root at the given slot, if any. Only returns roots in the canonical chain. @@ -763,11 +862,10 @@ impl BeaconChain { return Ok(root_opt); } - if let Some(((prev_root, _), (curr_root, curr_slot))) = - process_results(self.forwards_iter_block_roots(prev_slot)?, |iter| { - iter.tuple_windows().next() - })? - { + if let Some(((prev_root, _), (curr_root, curr_slot))) = process_results( + self.forwards_iter_block_roots_until(prev_slot, request_slot)?, + |iter| iter.tuple_windows().next(), + )? { // Sanity check. if curr_slot != request_slot { return Err(Error::InconsistentForwardsIter { @@ -815,18 +913,43 @@ impl BeaconChain { return Ok(Some(root)); } - process_results(self.forwards_iter_block_roots(request_slot)?, |mut iter| { - if let Some((root, slot)) = iter.next() { - if slot == request_slot { - Ok(Some(root)) + process_results( + self.forwards_iter_block_roots_until(request_slot, request_slot)?, + |mut iter| { + if let Some((root, slot)) = iter.next() { + if slot == request_slot { + Ok(Some(root)) + } else { + // Sanity check. + Err(Error::InconsistentForwardsIter { request_slot, slot }) + } } else { - // Sanity check. - Err(Error::InconsistentForwardsIter { request_slot, slot }) + Ok(None) } - } else { - Ok(None) - } - })? + }, + )? + } + + /// Returns the block at the given root, if any. + /// + /// Will also check the early attester cache for the block. Because of this, there's no + /// guarantee that a block returned from this function has a `BeaconState` available in + /// `self.store`. The expected use for this function is *only* for returning blocks requested + /// from P2P peers. + /// + /// ## Errors + /// + /// May return a database error. + pub fn get_block_checking_early_attester_cache( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + let block_opt = self + .store + .get_block(block_root)? + .or_else(|| self.early_attester_cache.get_block(*block_root)); + + Ok(block_opt) } /// Returns the block at the given root, if any. @@ -996,6 +1119,14 @@ impl BeaconChain { genesis_time: head.beacon_state.genesis_time(), genesis_validators_root: head.beacon_state.genesis_validators_root(), proposer_shuffling_decision_root, + is_merge_transition_complete: is_merge_transition_complete(&head.beacon_state), + execution_payload_block_hash: head + .beacon_block + .message() + .body() + .execution_payload() + .ok() + .map(|ep| ep.block_hash), }) }) } @@ -1078,12 +1209,13 @@ impl BeaconChain { Ok(state) } Ordering::Less => { - let state_root = process_results(self.forwards_iter_state_roots(slot)?, |iter| { - iter.take_while(|(_, current_slot)| *current_slot >= slot) - .find(|(_, current_slot)| *current_slot == slot) - .map(|(root, _slot)| root) - })? - .ok_or(Error::NoStateForSlot(slot))?; + let state_root = + process_results(self.forwards_iter_state_roots_until(slot, slot)?, |iter| { + iter.take_while(|(_, current_slot)| *current_slot >= slot) + .find(|(_, current_slot)| *current_slot == slot) + .map(|(root, _slot)| root) + })? + .ok_or(Error::NoStateForSlot(slot))?; Ok(self .get_state(&state_root, Some(slot))? @@ -1222,7 +1354,7 @@ impl BeaconChain { beacon_block_root: Hash256, state: &BeaconState, ) -> Result, Error> { - let iter = BlockRootsIterator::new(self.store.clone(), state); + let iter = BlockRootsIterator::new(&self.store, state); let iter_with_head = std::iter::once(Ok((beacon_block_root, state.slot()))) .chain(iter) .map(|result| result.map_err(|e| e.into())); @@ -1316,6 +1448,29 @@ impl BeaconChain { ) -> Result, Error> { let _total_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_SECONDS); + // The early attester cache will return `Some(attestation)` in the scenario where there is a + // block being imported that will become the head block, but that block has not yet been + // inserted into the database and set as `self.canonical_head`. + // + // In effect, the early attester cache prevents slow database IO from causing missed + // head/target votes. + match self + .early_attester_cache + .try_attest(request_slot, request_index, &self.spec) + { + // The cache matched this request, return the value. + Ok(Some(attestation)) => return Ok(attestation), + // The cache did not match this request, proceed with the rest of this function. + Ok(None) => (), + // The cache returned an error. Log the error and proceed with the rest of this + // function. + Err(e) => warn!( + self.log, + "Early attester cache failed"; + "error" => ?e + ), + } + let slots_per_epoch = T::EthSpec::slots_per_epoch(); let request_epoch = request_slot.epoch(slots_per_epoch); @@ -1568,7 +1723,8 @@ impl BeaconChain { // This method is called for API and gossip attestations, so this covers all unaggregated attestation events if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_attestation_subscribers() { - event_handler.register(EventKind::Attestation(v.attestation().clone())); + event_handler + .register(EventKind::Attestation(Box::new(v.attestation().clone()))); } } metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); @@ -1604,7 +1760,8 @@ impl BeaconChain { // This method is called for API and gossip attestations, so this covers all aggregated attestation events if let Some(event_handler) = self.event_handler.as_ref() { if event_handler.has_attestation_subscribers() { - event_handler.register(EventKind::Attestation(v.attestation().clone())); + event_handler + .register(EventKind::Attestation(Box::new(v.attestation().clone()))); } } metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); @@ -1664,7 +1821,11 @@ impl BeaconChain { self.fork_choice .write() - .on_attestation(self.slot()?, verified.indexed_attestation()) + .on_attestation( + self.slot()?, + verified.indexed_attestation(), + AttestationFromBlock::False, + ) .map_err(Into::into) } @@ -2288,6 +2449,7 @@ impl BeaconChain { let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); let mut ops = fully_verified_block.confirmation_db_batch; + let payload_verification_status = fully_verified_block.payload_verification_status; let attestation_observation_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); @@ -2406,8 +2568,21 @@ impl BeaconChain { { let _fork_choice_block_timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); + let block_delay = self + .slot_clock + .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .ok_or(Error::UnableToComputeTimeAtSlot)?; + fork_choice - .on_block(current_slot, &block, block_root, &state, &self.spec) + .on_block( + current_slot, + &block, + block_root, + block_delay, + &state, + payload_verification_status, + &self.spec, + ) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } @@ -2428,7 +2603,11 @@ impl BeaconChain { let indexed_attestation = get_indexed_attestation(committee.committee, attestation) .map_err(|e| BlockError::BeaconChainError(e.into()))?; - match fork_choice.on_attestation(current_slot, &indexed_attestation) { + match fork_choice.on_attestation( + current_slot, + &indexed_attestation, + AttestationFromBlock::True, + ) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The // block might be very old and therefore the attestations useless to fork choice. @@ -2472,8 +2651,44 @@ impl BeaconChain { } } + // If the block is recent enough, check to see if it becomes the head block. If so, apply it + // to the early attester cache. This will allow attestations to the block without waiting + // for the block and state to be inserted to the database. + // + // Only performing this check on recent blocks avoids slowing down sync with lots of calls + // to fork choice `get_head`. + if block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot { + let new_head_root = fork_choice + .get_head(current_slot, &self.spec) + .map_err(BeaconChainError::from)?; + + if new_head_root == block_root { + if let Some(proto_block) = fork_choice.get_block(&block_root) { + if let Err(e) = self.early_attester_cache.add_head_block( + block_root, + signed_block.clone(), + proto_block, + &state, + &self.spec, + ) { + warn!( + self.log, + "Early attester cache insert failed"; + "error" => ?e + ); + } + } else { + warn!( + self.log, + "Early attester block missing"; + "block_root" => ?block_root + ); + } + } + } + // Register sync aggregate with validator monitor - if let Some(sync_aggregate) = block.body().sync_aggregate() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { // `SyncCommittee` for the sync_aggregate should correspond to the duty slot let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); let sync_committee = self.sync_committee_at_epoch(duty_epoch)?; @@ -2514,7 +2729,7 @@ impl BeaconChain { block.body().attestations().len() as f64, ); - if let Some(sync_aggregate) = block.body().sync_aggregate() { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { metrics::set_gauge( &metrics::BLOCK_SYNC_AGGREGATE_SET_BITS, sync_aggregate.num_set_bits() as i64, @@ -2579,6 +2794,7 @@ impl BeaconChain { beacon_block_root: block_root, }, None, + &self.spec, ) }) .unwrap_or_else(|e| { @@ -2839,7 +3055,7 @@ impl BeaconChain { })) }; - let inner_block = match state { + let inner_block = match &state { BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase { slot, proposer_index, @@ -2876,6 +3092,28 @@ impl BeaconChain { }, }) } + BeaconState::Merge(_) => { + let sync_aggregate = get_sync_aggregate()?; + let execution_payload = get_execution_payload(self, &state)?; + BeaconBlock::Merge(BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations, + deposits, + voluntary_exits: voluntary_exits.into(), + sync_aggregate, + execution_payload, + }, + }) + } }; let block = SignedBeaconBlock::from_block( @@ -2884,12 +3122,26 @@ impl BeaconChain { Signature::empty(), ); + let block_size = block.ssz_bytes_len(); + debug!( + self.log, + "Produced block on state"; + "block_size" => block_size, + ); + + metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); + + if block_size > self.config.max_network_size { + return Err(BlockProductionError::BlockTooLarge(block_size)); + } + let process_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_PROCESS_TIMES); per_block_processing( &mut state, &block, None, BlockSignatureStrategy::VerifyRandao, + VerifyBlockRoot::True, &self.spec, )?; drop(process_timer); @@ -2906,7 +3158,7 @@ impl BeaconChain { trace!( self.log, "Produced beacon block"; - "parent" => %block.parent_root(), + "parent" => ?block.parent_root(), "attestations" => block.body().attestations().len(), "slot" => block.slot() ); @@ -2930,7 +3182,10 @@ impl BeaconChain { fn fork_choice_internal(&self) -> Result<(), Error> { // Determine the root of the block that is the head of the chain. - let beacon_block_root = self.fork_choice.write().get_head(self.slot()?)?; + let beacon_block_root = self + .fork_choice + .write() + .get_head(self.slot()?, &self.spec)?; let current_head = self.head_info()?; let old_finalized_checkpoint = current_head.finalized_checkpoint; @@ -3009,10 +3264,10 @@ impl BeaconChain { warn!( self.log, "Beacon chain re-org"; - "previous_head" => %current_head.block_root, + "previous_head" => ?current_head.block_root, "previous_slot" => current_head.slot, - "new_head_parent" => %new_head.beacon_block.parent_root(), - "new_head" => %beacon_block_root, + "new_head_parent" => ?new_head.beacon_block.parent_root(), + "new_head" => ?beacon_block_root, "new_slot" => new_head.beacon_block.slot(), "reorg_distance" => reorg_distance, ); @@ -3020,11 +3275,11 @@ impl BeaconChain { debug!( self.log, "Head beacon block"; - "justified_root" => %new_head.beacon_state.current_justified_checkpoint().root, + "justified_root" => ?new_head.beacon_state.current_justified_checkpoint().root, "justified_epoch" => new_head.beacon_state.current_justified_checkpoint().epoch, - "finalized_root" => %new_head.beacon_state.finalized_checkpoint().root, + "finalized_root" => ?new_head.beacon_state.finalized_checkpoint().root, "finalized_epoch" => new_head.beacon_state.finalized_checkpoint().epoch, - "root" => %beacon_block_root, + "root" => ?beacon_block_root, "slot" => new_head.beacon_block.slot(), ); }; @@ -3067,8 +3322,21 @@ impl BeaconChain { .beacon_state .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); + // Used later for the execution engine. + let new_head_execution_block_hash_opt = new_head + .beacon_block + .message() + .body() + .execution_payload() + .ok() + .map(|ep| ep.block_hash); + let is_merge_transition_complete = is_merge_transition_complete(&new_head.beacon_state); + drop(lag_timer); + // Clear the early attester cache in case it conflicts with `self.canonical_head`. + self.early_attester_cache.clear(); + // Update the snapshot that stores the head of the chain at the time it received the // block. *self @@ -3173,6 +3441,33 @@ impl BeaconChain { } if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch { + // Check to ensure that this finalized block hasn't been marked as invalid. + let finalized_block = self + .fork_choice + .read() + .get_block(&new_finalized_checkpoint.root) + .ok_or(BeaconChainError::FinalizedBlockMissingFromForkChoice( + new_finalized_checkpoint.root, + ))?; + if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status { + crit!( + self.log, + "Finalized block has an invalid payload"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block_hash + ); + let mut shutdown_sender = self.shutdown_sender(); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Finalized block has an invalid execution payload.", + )) + .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; + + // Exit now, the node is in an invalid state. + return Ok(()); + } + // Due to race conditions, it's technically possible that the head we load here is // different to the one earlier in this function. // @@ -3192,7 +3487,7 @@ impl BeaconChain { .epoch .start_slot(T::EthSpec::slots_per_epoch()); let new_finalized_state_root = process_results( - StateRootsIterator::new(self.store.clone(), &head.beacon_state), + StateRootsIterator::new(&self.store, &head.beacon_state), |mut iter| { iter.find_map(|(state_root, slot)| { if slot == new_finalized_slot { @@ -3270,9 +3565,94 @@ impl BeaconChain { } } + // If this is a post-merge block, update the execution layer. + if let Some(new_head_execution_block_hash) = new_head_execution_block_hash_opt { + if is_merge_transition_complete { + let execution_layer = self + .execution_layer + .clone() + .ok_or(Error::ExecutionLayerMissing)?; + let store = self.store.clone(); + let log = self.log.clone(); + + // Spawn the update task, without waiting for it to complete. + execution_layer.spawn( + move |execution_layer| async move { + if let Err(e) = Self::update_execution_engine_forkchoice( + execution_layer, + store, + new_finalized_checkpoint.root, + new_head_execution_block_hash, + ) + .await + { + debug!( + log, + "Failed to update execution head"; + "error" => ?e + ); + } + }, + "update_execution_engine_forkchoice", + ) + } + } + Ok(()) } + pub async fn update_execution_engine_forkchoice( + execution_layer: ExecutionLayer, + store: BeaconStore, + finalized_beacon_block_root: Hash256, + head_execution_block_hash: Hash256, + ) -> Result<(), Error> { + // Loading the finalized block from the store is not ideal. Perhaps it would be better to + // store it on fork-choice so we can do a lookup without hitting the database. + // + // See: https://github.com/sigp/lighthouse/pull/2627#issuecomment-927537245 + let finalized_block = store + .get_block(&finalized_beacon_block_root)? + .ok_or(Error::MissingBeaconBlock(finalized_beacon_block_root))?; + + let finalized_execution_block_hash = finalized_block + .message() + .body() + .execution_payload() + .ok() + .map(|ep| ep.block_hash) + .unwrap_or_else(Hash256::zero); + + execution_layer + .notify_forkchoice_updated( + head_execution_block_hash, + finalized_execution_block_hash, + None, + ) + .await + .map_err(Error::ExecutionForkChoiceUpdateFailed) + } + + /// Returns the status of the current head block, regarding the validity of the execution + /// payload. + pub fn head_safety_status(&self) -> Result { + let head = self.head_info()?; + let head_block = self + .fork_choice + .read() + .get_block(&head.block_root) + .ok_or(BeaconChainError::HeadMissingFromForkChoice(head.block_root))?; + + let status = match head_block.execution_status { + ExecutionStatus::Valid(block_hash) => HeadSafetyStatus::Safe(Some(block_hash)), + ExecutionStatus::Invalid(block_hash) => HeadSafetyStatus::Invalid(block_hash), + ExecutionStatus::Unknown(block_hash) => HeadSafetyStatus::Unsafe(block_hash), + ExecutionStatus::Irrelevant(_) => HeadSafetyStatus::Safe(None), + }; + + Ok(status) + } + /// This function takes a configured weak subjectivity `Checkpoint` and the latest finalized `Checkpoint`. /// If the weak subjectivity checkpoint and finalized checkpoint share the same epoch, we compare /// roots. If we the weak subjectivity checkpoint is from an older epoch, we iterate back through @@ -3359,6 +3739,12 @@ impl BeaconChain { .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .map(|mut snapshot_cache| { snapshot_cache.prune(new_finalized_checkpoint.epoch); + debug!( + self.log, + "Snapshot cache pruned"; + "new_len" => snapshot_cache.len(), + "remaining_roots" => ?snapshot_cache.beacon_block_roots(), + ); }) .unwrap_or_else(|| { error!( diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 66a99e1f42..1974686dc5 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -1,16 +1,20 @@ //! Defines the `BeaconForkChoiceStore` which provides the persistent storage for the `ForkChoice` //! struct. //! -//! Additionally, the private `BalancesCache` struct is defined; a cache designed to avoid database +//! Additionally, the `BalancesCache` struct is defined; a cache designed to avoid database //! reads when fork choice requires the validator balances of the justified state. use crate::{metrics, BeaconSnapshot}; +use derivative::Derivative; use fork_choice::ForkChoiceStore; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; -use types::{BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, Slot}; +use superstruct::superstruct; +use types::{ + BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, Slot, +}; #[derive(Debug)] pub enum Error { @@ -54,24 +58,34 @@ pub fn get_effective_balances(state: &BeaconState) -> Vec { .collect() } -/// An item that is stored in the `BalancesCache`. -#[derive(PartialEq, Clone, Debug, Encode, Decode)] -struct CacheItem { - /// The block root at which `self.balances` are valid. - block_root: Hash256, - /// The effective balances from a `BeaconState` validator registry. - balances: Vec, +#[superstruct( + variants(V1, V8), + variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)), + no_enum +)] +pub(crate) struct CacheItem { + pub(crate) block_root: Hash256, + #[superstruct(only(V8))] + pub(crate) epoch: Epoch, + pub(crate) balances: Vec, } -/// Provides a cache to avoid reading `BeaconState` from disk when updating the current justified -/// checkpoint. -/// -/// It is effectively a mapping of `epoch_boundary_block_root -> state.balances`. -#[derive(PartialEq, Clone, Default, Debug, Encode, Decode)] -struct BalancesCache { - items: Vec, +pub(crate) type CacheItem = CacheItemV8; + +#[superstruct( + variants(V1, V8), + variant_attributes(derive(PartialEq, Clone, Default, Debug, Encode, Decode)), + no_enum +)] +pub struct BalancesCache { + #[superstruct(only(V1))] + pub(crate) items: Vec, + #[superstruct(only(V8))] + pub(crate) items: Vec, } +pub type BalancesCache = BalancesCacheV8; + impl BalancesCache { /// Inspect the given `state` and determine the root of the block at the first slot of /// `state.current_epoch`. If there is not already some entry for the given block root, then @@ -81,13 +95,8 @@ impl BalancesCache { block_root: Hash256, state: &BeaconState, ) -> Result<(), Error> { - // We are only interested in balances from states that are at the start of an epoch, - // because this is where the `current_justified_checkpoint.root` will point. - if !Self::is_first_block_in_epoch(block_root, state)? { - return Ok(()); - } - - let epoch_boundary_slot = state.current_epoch().start_slot(E::slots_per_epoch()); + let epoch = state.current_epoch(); + let epoch_boundary_slot = epoch.start_slot(E::slots_per_epoch()); let epoch_boundary_root = if epoch_boundary_slot == state.slot() { block_root } else { @@ -96,9 +105,14 @@ impl BalancesCache { *state.get_block_root(epoch_boundary_slot)? }; - if self.position(epoch_boundary_root).is_none() { + // Check if there already exists a cache entry for the epoch boundary block of the current + // epoch. We rely on the invariant that effective balances do not change for the duration + // of a single epoch, so even if the block on the epoch boundary itself is skipped we can + // still update its cache entry from any subsequent state in that epoch. + if self.position(epoch_boundary_root, epoch).is_none() { let item = CacheItem { block_root: epoch_boundary_root, + epoch, balances: get_effective_balances(state), }; @@ -112,50 +126,27 @@ impl BalancesCache { Ok(()) } - /// Returns `true` if the given `block_root` is the first/only block to have been processed in - /// the epoch of the given `state`. - /// - /// We can determine if it is the first block by looking back through `state.block_roots` to - /// see if there is a block in the current epoch with a different root. - fn is_first_block_in_epoch( - block_root: Hash256, - state: &BeaconState, - ) -> Result { - let mut prior_block_found = false; - - for slot in state.current_epoch().slot_iter(E::slots_per_epoch()) { - if slot < state.slot() { - if *state.get_block_root(slot)? != block_root { - prior_block_found = true; - break; - } - } else { - break; - } - } - - Ok(!prior_block_found) - } - - fn position(&self, block_root: Hash256) -> Option { + fn position(&self, block_root: Hash256, epoch: Epoch) -> Option { self.items .iter() - .position(|item| item.block_root == block_root) + .position(|item| item.block_root == block_root && item.epoch == epoch) } /// Get the balances for the given `block_root`, if any. /// - /// If some balances are found, they are removed from the cache. - pub fn get(&mut self, block_root: Hash256) -> Option> { - let i = self.position(block_root)?; - Some(self.items.remove(i).balances) + /// If some balances are found, they are cloned from the cache. + pub fn get(&mut self, block_root: Hash256, epoch: Epoch) -> Option> { + let i = self.position(block_root, epoch)?; + Some(self.items[i].balances.clone()) } } /// Implements `fork_choice::ForkChoiceStore` in order to provide a persistent backing to the /// `fork_choice::ForkChoice` struct. -#[derive(Debug)] +#[derive(Debug, Derivative)] +#[derivative(PartialEq(bound = "E: EthSpec, Hot: ItemStore, Cold: ItemStore"))] pub struct BeaconForkChoiceStore, Cold: ItemStore> { + #[derivative(PartialEq = "ignore")] store: Arc>, balances_cache: BalancesCache, time: Slot, @@ -163,26 +154,10 @@ pub struct BeaconForkChoiceStore, Cold: ItemStore< justified_checkpoint: Checkpoint, justified_balances: Vec, best_justified_checkpoint: Checkpoint, + proposer_boost_root: Hash256, _phantom: PhantomData, } -impl PartialEq for BeaconForkChoiceStore -where - E: EthSpec, - Hot: ItemStore, - Cold: ItemStore, -{ - /// This implementation ignores the `store` and `slot_clock`. - fn eq(&self, other: &Self) -> bool { - self.balances_cache == other.balances_cache - && self.time == other.time - && self.finalized_checkpoint == other.finalized_checkpoint - && self.justified_checkpoint == other.justified_checkpoint - && self.justified_balances == other.justified_balances - && self.best_justified_checkpoint == other.best_justified_checkpoint - } -} - impl BeaconForkChoiceStore where E: EthSpec, @@ -226,6 +201,7 @@ where justified_balances: anchor_state.balances().to_vec(), finalized_checkpoint, best_justified_checkpoint: justified_checkpoint, + proposer_boost_root: Hash256::zero(), _phantom: PhantomData, } } @@ -240,6 +216,7 @@ where justified_checkpoint: self.justified_checkpoint, justified_balances: self.justified_balances.clone(), best_justified_checkpoint: self.best_justified_checkpoint, + proposer_boost_root: self.proposer_boost_root, } } @@ -256,6 +233,7 @@ where justified_checkpoint: persisted.justified_checkpoint, justified_balances: persisted.justified_balances, best_justified_checkpoint: persisted.best_justified_checkpoint, + proposer_boost_root: persisted.proposer_boost_root, _phantom: PhantomData, }) } @@ -302,6 +280,10 @@ where &self.finalized_checkpoint } + fn proposer_boost_root(&self) -> Hash256 { + self.proposer_boost_root + } + fn set_finalized_checkpoint(&mut self, checkpoint: Checkpoint) { self.finalized_checkpoint = checkpoint } @@ -309,7 +291,10 @@ where fn set_justified_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<(), Error> { self.justified_checkpoint = checkpoint; - if let Some(balances) = self.balances_cache.get(self.justified_checkpoint.root) { + if let Some(balances) = self.balances_cache.get( + self.justified_checkpoint.root, + self.justified_checkpoint.epoch, + ) { metrics::inc_counter(&metrics::BALANCES_CACHE_HITS); self.justified_balances = balances; } else { @@ -322,13 +307,13 @@ where .deconstruct() .0; - self.justified_balances = self + let state = self .store .get_state(&justified_block.state_root(), Some(justified_block.slot())) .map_err(Error::FailedToReadState)? - .ok_or_else(|| Error::MissingState(justified_block.state_root()))? - .balances() - .to_vec(); + .ok_or_else(|| Error::MissingState(justified_block.state_root()))?; + + self.justified_balances = get_effective_balances(&state); } Ok(()) @@ -337,15 +322,30 @@ where fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint) { self.best_justified_checkpoint = checkpoint } + + fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256) { + self.proposer_boost_root = proposer_boost_root; + } } /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[derive(Encode, Decode)] +#[superstruct( + variants(V1, V7, V8), + variant_attributes(derive(Encode, Decode)), + no_enum +)] pub struct PersistedForkChoiceStore { - balances_cache: BalancesCache, - time: Slot, - finalized_checkpoint: Checkpoint, - justified_checkpoint: Checkpoint, - justified_balances: Vec, - best_justified_checkpoint: Checkpoint, + #[superstruct(only(V1, V7))] + pub balances_cache: BalancesCacheV1, + #[superstruct(only(V8))] + pub balances_cache: BalancesCacheV8, + pub time: Slot, + pub finalized_checkpoint: Checkpoint, + pub justified_checkpoint: Checkpoint, + pub justified_balances: Vec, + pub best_justified_checkpoint: Checkpoint, + #[superstruct(only(V7, V8))] + pub proposer_boost_root: Hash256, } + +pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV8; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 5ddeafa459..c6d937c81e 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -40,6 +40,9 @@ //! END //! //! ``` +use crate::execution_payload::{ + execute_payload, validate_execution_payload_for_gossip, validate_merge_block, +}; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -50,21 +53,24 @@ use crate::{ }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; -use fork_choice::{ForkChoice, ForkChoiceStore}; +use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; +use safe_arith::ArithError; use slog::{debug, error, Logger}; use slot_clock::SlotClock; use ssz::Encode; +use state_processing::per_block_processing::is_merge_transition_block; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_slot_processing, state_advance::partial_state_advance, - BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, + BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs; use std::io::Write; +use std::time::Duration; use store::{Error as DBError, HotColdDB, HotStateSummary, KeyValueStore, StoreOp}; use tree_hash::TreeHash; use types::{ @@ -223,6 +229,105 @@ pub enum BlockError { /// /// The block is invalid and the peer is faulty. InconsistentFork(InconsistentFork), + /// There was an error while validating the ExecutionPayload + /// + /// ## Peer scoring + /// + /// See `ExecutionPayloadError` for scoring information + ExecutionPayloadError(ExecutionPayloadError), + /// The block references an parent block which has an execution payload which was found to be + /// invalid. + /// + /// ## Peer scoring + /// + /// TODO(merge): reconsider how we score peers for this. + /// + /// The peer sent us an invalid block, but I'm not really sure how to score this in an + /// "optimistic" sync world. + ParentExecutionPayloadInvalid { parent_root: Hash256 }, +} + +/// Returned when block validation failed due to some issue verifying +/// the execution payload. +#[derive(Debug)] +pub enum ExecutionPayloadError { + /// There's no eth1 connection (mandatory after merge) + /// + /// ## Peer scoring + /// + /// As this is our fault, do not penalize the peer + NoExecutionConnection, + /// Error occurred during engine_executePayload + /// + /// ## Peer scoring + /// + /// Some issue with our configuration, do not penalize peer + RequestFailed(execution_layer::Error), + /// The execution engine returned INVALID for the payload + /// + /// ## Peer scoring + /// + /// The block is invalid and the peer is faulty + RejectedByExecutionEngine, + /// The execution payload timestamp does not match the slot + /// + /// ## Peer scoring + /// + /// The block is invalid and the peer is faulty + InvalidPayloadTimestamp { expected: u64, found: u64 }, + /// The execution payload references an execution block that cannot trigger the merge. + /// + /// ## Peer scoring + /// + /// The block is invalid and the peer sent us a block that passes gossip propagation conditions, + /// but is invalid upon further verification. + InvalidTerminalPoWBlock { parent_hash: Hash256 }, + /// The `TERMINAL_BLOCK_HASH` is set, but the block has not reached the + /// `TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH`. + /// + /// ## Peer scoring + /// + /// The block is invalid and the peer sent us a block that passes gossip propagation conditions, + /// but is invalid upon further verification. + InvalidActivationEpoch { + activation_epoch: Epoch, + epoch: Epoch, + }, + /// The `TERMINAL_BLOCK_HASH` is set, but does not match the value specified by the block. + /// + /// ## Peer scoring + /// + /// The block is invalid and the peer sent us a block that passes gossip propagation conditions, + /// but is invalid upon further verification. + InvalidTerminalBlockHash { + terminal_block_hash: Hash256, + payload_parent_hash: Hash256, + }, + /// The execution node failed to provide a parent block to a known block. This indicates an + /// issue with the execution node. + /// + /// ## Peer scoring + /// + /// The peer is not necessarily invalid. + PoWParentMissing(Hash256), +} + +impl From for ExecutionPayloadError { + fn from(e: execution_layer::Error) -> Self { + ExecutionPayloadError::RequestFailed(e) + } +} + +impl From for BlockError { + fn from(e: ExecutionPayloadError) -> Self { + BlockError::ExecutionPayloadError(e) + } +} + +impl From for BlockError { + fn from(e: InconsistentFork) -> Self { + BlockError::InconsistentFork(e) + } } impl std::fmt::Display for BlockError { @@ -277,6 +382,12 @@ impl From for BlockError { } } +impl From for BlockError { + fn from(e: ArithError) -> Self { + BlockError::BeaconChainError(BeaconChainError::ArithError(e)) + } +} + /// Information about invalid blocks which might still be slashable despite being invalid. #[allow(clippy::enum_variant_names)] pub enum BlockSlashInfo { @@ -428,6 +539,7 @@ pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { pub state: BeaconState, pub parent_block: SignedBeaconBlock, pub confirmation_db_batch: Vec>, + pub payload_verification_status: PayloadVerificationStatus, } /// Implemented on types that can be converted into a `FullyVerifiedBlock`. @@ -668,6 +780,9 @@ impl GossipVerifiedBlock { }); } + // Validate the block's execution_payload (if any). + validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?; + Ok(Self { block, block_root, @@ -989,6 +1104,28 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { } } + // If this block triggers the merge, check to ensure that it references valid execution + // blocks. + // + // The specification defines this check inside `on_block` in the fork-choice specification, + // however we perform the check here for two reasons: + // + // - There's no point in importing a block that will fail fork choice, so it's best to fail + // early. + // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no + // calls to remote servers. + if is_merge_transition_block(&state, block.message().body()) { + validate_merge_block(chain, block.message())? + } + + // The specification declares that this should be run *inside* `per_block_processing`, + // however we run it here to keep `per_block_processing` pure (i.e., no calls to external + // servers). + // + // It is important that this function is called *after* `per_slot_processing`, since the + // `randao` may change. + let payload_verification_status = execute_payload(chain, &state, block.message())?; + // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); @@ -1048,6 +1185,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { Some(block_root), // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, + VerifyBlockRoot::True, &chain.spec, ) { match err { @@ -1093,6 +1231,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { state, parent_block: parent.beacon_block, confirmation_db_batch, + payload_verification_status, }) } } @@ -1281,6 +1420,8 @@ fn load_parent( ), BlockError, > { + let spec = &chain.spec; + // Reject any block if its parent is not known to fork choice. // // A block that is not in fork choice is either: @@ -1299,15 +1440,43 @@ fn load_parent( return Err(BlockError::ParentUnknown(Box::new(block))); } + let block_delay = chain + .block_times_cache + .read() + .get_block_delays( + block.canonical_root(), + chain + .slot_clock + .start_of(block.slot()) + .unwrap_or_else(|| Duration::from_secs(0)), + ) + .observed; + let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); - let result = if let Some(snapshot) = chain + let result = if let Some((snapshot, cloned)) = chain .snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .and_then(|mut snapshot_cache| { - snapshot_cache.get_state_for_block_processing(block.parent_root()) + snapshot_cache.get_state_for_block_processing( + block.parent_root(), + block.slot(), + block_delay, + spec, + ) }) { - Ok((snapshot.into_pre_state(), block)) + if cloned { + metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_CLONES); + debug!( + chain.log, + "Cloned snapshot for late block/skipped slot"; + "slot" => %block.slot(), + "parent_slot" => %snapshot.beacon_block.slot(), + "parent_root" => ?block.parent_root(), + "block_delay" => ?block_delay, + ); + } + Ok((snapshot, block)) } else { // Load the blocks parent block from the database, returning invalid if that block is not // found. @@ -1337,6 +1506,16 @@ fn load_parent( BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root)) })?; + metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); + debug!( + chain.log, + "Missed snapshot cache"; + "slot" => block.slot(), + "parent_slot" => parent_block.slot(), + "parent_root" => ?block.parent_root(), + "block_delay" => ?block_delay, + ); + Ok(( PreProcessingSnapshot { beacon_block: parent_block, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index d96ca70829..24a9a916bb 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -15,6 +15,7 @@ use crate::{ Eth1ChainBackend, ServerSentEventHandler, }; use eth1::Config as Eth1Config; +use execution_layer::ExecutionLayer; use fork_choice::ForkChoice; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; @@ -75,6 +76,7 @@ pub struct BeaconChainBuilder { >, op_pool: Option>, eth1_chain: Option>, + execution_layer: Option, event_handler: Option>, slot_clock: Option, shutdown_sender: Option>, @@ -82,7 +84,6 @@ pub struct BeaconChainBuilder { validator_pubkey_cache: Option>, spec: ChainSpec, chain_config: ChainConfig, - disabled_forks: Vec, log: Option, graffiti: Graffiti, slasher: Option>>, @@ -115,11 +116,11 @@ where fork_choice: None, op_pool: None, eth1_chain: None, + execution_layer: None, event_handler: None, slot_clock: None, shutdown_sender: None, head_tracker: None, - disabled_forks: Vec::new(), validator_pubkey_cache: None, spec: TEthSpec::default_spec(), chain_config: ChainConfig::default(), @@ -181,13 +182,6 @@ where self.log = Some(log); self } - - /// Sets a list of hard-coded forks that will not be activated. - pub fn disabled_forks(mut self, disabled_forks: Vec) -> Self { - self.disabled_forks = disabled_forks; - self - } - /// Attempt to load an existing eth1 cache from the builder's `Store`. pub fn get_persisted_eth1_backend(&self) -> Result, String> { let store = self @@ -476,6 +470,12 @@ where self } + /// Sets the `BeaconChain` execution layer. + pub fn execution_layer(mut self, execution_layer: Option) -> Self { + self.execution_layer = execution_layer; + self + } + /// Sets the `BeaconChain` event handler backend. /// /// For example, provide `ServerSentEventHandler` as a `handler`. @@ -582,7 +582,7 @@ where }; let initial_head_block_root = fork_choice - .get_head(current_slot) + .get_head(current_slot, &self.spec) .map_err(|e| format!("Unable to get fork choice head: {:?}", e))?; // Try to decode the head block according to the current fork, if that fails, try @@ -737,6 +737,7 @@ where observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), eth1_chain: self.eth1_chain, + execution_layer: self.execution_layer, genesis_validators_root: canonical_head.beacon_state.genesis_validators_root(), canonical_head: TimeoutRwLock::new(canonical_head.clone()), genesis_block_root, @@ -753,7 +754,7 @@ where block_times_cache: <_>::default(), validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), attester_cache: <_>::default(), - disabled_forks: self.disabled_forks, + early_attester_cache: <_>::default(), shutdown_sender: self .shutdown_sender .ok_or("Cannot build without a shutdown sender.")?, @@ -909,7 +910,9 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { mod test { use super::*; use eth2_hashing::hash; - use genesis::{generate_deterministic_keypairs, interop_genesis_state}; + use genesis::{ + generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, + }; use sloggers::{null::NullLoggerBuilder, Build}; use ssz::Encode; use std::time::Duration; @@ -941,6 +944,8 @@ mod test { let genesis_state = interop_genesis_state( &generate_deterministic_keypairs(validator_count), genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, &spec, ) .expect("should create interop genesis state"); @@ -1006,8 +1011,14 @@ mod test { let keypairs = generate_deterministic_keypairs(validator_count); - let state = interop_genesis_state::(&keypairs, genesis_time, spec) - .expect("should build state"); + let state = interop_genesis_state::( + &keypairs, + genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + spec, + ) + .expect("should build state"); assert_eq!( state.eth1_data().block_hash, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 9fe09c9822..4aee06d468 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -16,6 +16,8 @@ pub struct ChainConfig { pub reconstruct_historic_states: bool, /// Whether timeouts on `TimeoutRwLock`s are enabled or not. pub enable_lock_timeouts: bool, + /// The max size of a message that can be sent over the network. + pub max_network_size: usize, } impl Default for ChainConfig { @@ -25,6 +27,7 @@ impl Default for ChainConfig { weak_subjectivity_checkpoint: None, reconstruct_historic_states: false, enable_lock_timeouts: true, + max_network_size: 10 * 1_048_576, // 10M } } } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs new file mode 100644 index 0000000000..56dced94e6 --- /dev/null +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -0,0 +1,161 @@ +use crate::{ + attester_cache::{CommitteeLengths, Error}, + metrics, +}; +use parking_lot::RwLock; +use proto_array::Block as ProtoBlock; +use types::*; + +pub struct CacheItem { + /* + * Values used to create attestations. + */ + epoch: Epoch, + committee_lengths: CommitteeLengths, + beacon_block_root: Hash256, + source: Checkpoint, + target: Checkpoint, + /* + * Values used to make the block available. + */ + block: SignedBeaconBlock, + proto_block: ProtoBlock, +} + +/// Provides a single-item cache which allows for attesting to blocks before those blocks have +/// reached the database. +/// +/// This cache stores enough information to allow Lighthouse to: +/// +/// - Produce an attestation without using `chain.canonical_head`. +/// - Verify that a block root exists (i.e., will be imported in the future) during attestation +/// verification. +/// - Provide a block which can be sent to peers via RPC. +#[derive(Default)] +pub struct EarlyAttesterCache { + item: RwLock>>, +} + +impl EarlyAttesterCache { + /// Removes the cached item, meaning that all future calls to `Self::try_attest` will return + /// `None` until a new cache item is added. + pub fn clear(&self) { + *self.item.write() = None + } + + /// Updates the cache item, so that `Self::try_attest` with return `Some` when given suitable + /// parameters. + pub fn add_head_block( + &self, + beacon_block_root: Hash256, + block: SignedBeaconBlock, + proto_block: ProtoBlock, + state: &BeaconState, + spec: &ChainSpec, + ) -> Result<(), Error> { + let epoch = state.current_epoch(); + let committee_lengths = CommitteeLengths::new(state, spec)?; + let source = state.current_justified_checkpoint(); + let target_slot = epoch.start_slot(E::slots_per_epoch()); + let target = Checkpoint { + epoch, + root: if state.slot() <= target_slot { + beacon_block_root + } else { + *state.get_block_root(target_slot)? + }, + }; + + let item = CacheItem { + epoch, + committee_lengths, + beacon_block_root, + source, + target, + block, + proto_block, + }; + + *self.item.write() = Some(item); + + Ok(()) + } + + /// Will return `Some(attestation)` if all the following conditions are met: + /// + /// - There is a cache `item` present. + /// - If `request_slot` is in the same epoch as `item.epoch`. + /// - If `request_index` does not exceed `item.comittee_count`. + pub fn try_attest( + &self, + request_slot: Slot, + request_index: CommitteeIndex, + spec: &ChainSpec, + ) -> Result>, Error> { + let lock = self.item.read(); + let item = if let Some(item) = lock.as_ref() { + item + } else { + return Ok(None); + }; + + let request_epoch = request_slot.epoch(E::slots_per_epoch()); + if request_epoch != item.epoch { + return Ok(None); + } + + let committee_count = item + .committee_lengths + .get_committee_count_per_slot::(spec)?; + if request_index >= committee_count as u64 { + return Ok(None); + } + + let committee_len = + item.committee_lengths + .get_committee_length::(request_slot, request_index, spec)?; + + let attestation = Attestation { + aggregation_bits: BitList::with_capacity(committee_len) + .map_err(BeaconStateError::from)?, + data: AttestationData { + slot: request_slot, + index: request_index, + beacon_block_root: item.beacon_block_root, + source: item.source, + target: item.target, + }, + signature: AggregateSignature::empty(), + }; + + metrics::inc_counter(&metrics::BEACON_EARLY_ATTESTER_CACHE_HITS); + + Ok(Some(attestation)) + } + + /// Returns `true` if `block_root` matches the cached item. + pub fn contains_block(&self, block_root: Hash256) -> bool { + self.item + .read() + .as_ref() + .map_or(false, |item| item.beacon_block_root == block_root) + } + + /// Returns the block, if `block_root` matches the cached item. + pub fn get_block(&self, block_root: Hash256) -> Option> { + self.item + .read() + .as_ref() + .filter(|item| item.beacon_block_root == block_root) + .map(|item| item.block.clone()) + } + + /// Returns the proto-array block, if `block_root` matches the cached item. + pub fn get_proto_block(&self, block_root: Hash256) -> Option { + self.item + .read() + .as_ref() + .filter(|item| item.beacon_block_root == block_root) + .map(|item| item.proto_block.clone()) + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 65b07d87f1..70e288ec26 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -20,7 +20,7 @@ use state_processing::{ }, signature_sets::Error as SignatureSetError, state_advance::Error as StateAdvanceError, - BlockProcessingError, SlotProcessingError, + BlockProcessingError, BlockReplayError, SlotProcessingError, }; use std::time::Duration; use task_executor::ShutdownReason; @@ -40,6 +40,7 @@ macro_rules! easy_from_to { pub enum BeaconChainError { InsufficientValidators, UnableToReadSlot, + UnableToComputeTimeAtSlot, RevertedFinalizedEpoch { previous_epoch: Epoch, new_epoch: Epoch, @@ -85,6 +86,7 @@ pub enum BeaconChainError { ValidatorPubkeyCacheIncomplete(usize), SignatureSetError(SignatureSetError), BlockSignatureVerifierError(state_processing::block_signature_verifier::Error), + BlockReplayError(BlockReplayError), DuplicateValidatorPublicKey, ValidatorPubkeyCacheFileError(String), ValidatorIndexUnknown(usize), @@ -133,6 +135,11 @@ pub enum BeaconChainError { new_slot: Slot, }, AltairForkDisabled, + ExecutionLayerMissing, + ExecutionForkChoiceUpdateFailed(execution_layer::Error), + HeadMissingFromForkChoice(Hash256), + FinalizedBlockMissingFromForkChoice(Hash256), + InvalidFinalizedPayloadShutdownError(TrySendError), } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -154,6 +161,7 @@ easy_from_to!(ArithError, BeaconChainError); easy_from_to!(ForkChoiceStoreError, BeaconChainError); easy_from_to!(HistoricalBlockError, BeaconChainError); easy_from_to!(StateAdvanceError, BeaconChainError); +easy_from_to!(BlockReplayError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { @@ -174,6 +182,13 @@ pub enum BlockProductionError { produce_at_slot: Slot, state_slot: Slot, }, + ExecutionLayerMissing, + BlockingFailed(execution_layer::Error), + TerminalPoWBlockLookupFailed(execution_layer::Error), + GetPayloadFailed(execution_layer::Error), + FailedToReadFinalizedBlock(store::Error), + MissingFinalizedBlock(Hash256), + BlockTooLarge(usize), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index aa6978b79f..8dd101b726 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -221,6 +221,11 @@ where } } + /// Returns `true` if the "dummy" backend is being used. + pub fn is_dummy_backend(&self) -> bool { + self.use_dummy_backend + } + /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. pub fn eth1_data_for_block_production( diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs new file mode 100644 index 0000000000..c19bba6126 --- /dev/null +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -0,0 +1,308 @@ +//! This module contains various functions for producing and verifying `ExecutionPayloads`. +//! +//! Lighthouse tends to do payload tasks in *slightly* different locations to the specification. +//! This is because some tasks involve calling out to external servers and it's nice to keep those +//! away from our pure `state_processing` and `fork_choice` crates. +//! +//! So, this module contains functions that one might expect to find in other crates, but they live +//! here for good reason. + +use crate::{ + BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, + ExecutionPayloadError, +}; +use execution_layer::ExecutePayloadResponseStatus; +use fork_choice::PayloadVerificationStatus; +use proto_array::{Block as ProtoBlock, ExecutionStatus}; +use slog::debug; +use slot_clock::SlotClock; +use state_processing::per_block_processing::{ + compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, + partially_verify_execution_payload, +}; +use types::*; + +/// Verify that `execution_payload` contained by `block` is considered valid by an execution +/// engine. +/// +/// ## Specification +/// +/// Equivalent to the `execute_payload` function in the merge Beacon Chain Changes, although it +/// contains a few extra checks by running `partially_verify_execution_payload` first: +/// +/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#execute_payload +pub fn execute_payload( + chain: &BeaconChain, + state: &BeaconState, + block: BeaconBlockRef, +) -> Result> { + if !is_execution_enabled(state, block.body()) { + return Ok(PayloadVerificationStatus::Irrelevant); + } + + let execution_payload = block.execution_payload()?; + + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these checks + // are cheap and doing them here ensures we protect the execution payload from junk. + partially_verify_execution_payload(state, execution_payload, &chain.spec) + .map_err(BlockError::PerBlockProcessingError)?; + + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(ExecutionPayloadError::NoExecutionConnection)?; + let execute_payload_response = execution_layer + .block_on(|execution_layer| execution_layer.execute_payload(execution_payload)); + + match execute_payload_response { + Ok((status, _latest_valid_hash)) => match status { + ExecutePayloadResponseStatus::Valid => Ok(PayloadVerificationStatus::Verified), + // TODO(merge): invalidate any invalid ancestors of this block in fork choice. + ExecutePayloadResponseStatus::Invalid => { + Err(ExecutionPayloadError::RejectedByExecutionEngine.into()) + } + ExecutePayloadResponseStatus::Syncing => Ok(PayloadVerificationStatus::NotVerified), + }, + Err(_) => Err(ExecutionPayloadError::RejectedByExecutionEngine.into()), + } +} + +/// Verify that the block which triggers the merge is valid to be imported to fork choice. +/// +/// ## Errors +/// +/// Will return an error when using a pre-merge fork `state`. Ensure to only run this function +/// after the merge fork. +/// +/// ## Specification +/// +/// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes: +/// +/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block +pub fn validate_merge_block( + chain: &BeaconChain, + block: BeaconBlockRef, +) -> Result<(), BlockError> { + let spec = &chain.spec; + let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); + let execution_payload = block.execution_payload()?; + + if spec.terminal_block_hash != Hash256::zero() { + if block_epoch < spec.terminal_block_hash_activation_epoch { + return Err(ExecutionPayloadError::InvalidActivationEpoch { + activation_epoch: spec.terminal_block_hash_activation_epoch, + epoch: block_epoch, + } + .into()); + } + + if execution_payload.parent_hash != spec.terminal_block_hash { + return Err(ExecutionPayloadError::InvalidTerminalBlockHash { + terminal_block_hash: spec.terminal_block_hash, + payload_parent_hash: execution_payload.parent_hash, + } + .into()); + } + + return Ok(()); + } + + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(ExecutionPayloadError::NoExecutionConnection)?; + + let is_valid_terminal_pow_block = execution_layer + .block_on(|execution_layer| { + execution_layer.is_valid_terminal_pow_block_hash(execution_payload.parent_hash, spec) + }) + .map_err(ExecutionPayloadError::from)?; + + match is_valid_terminal_pow_block { + Some(true) => Ok(()), + Some(false) => Err(ExecutionPayloadError::InvalidTerminalPoWBlock { + parent_hash: execution_payload.parent_hash, + } + .into()), + None => { + debug!( + chain.log, + "Optimistically accepting terminal block"; + "block_hash" => ?execution_payload.parent_hash, + "msg" => "the terminal block/parent was unavailable" + ); + Ok(()) + } + } +} + +/// Validate the gossip block's execution_payload according to the checks described here: +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block +pub fn validate_execution_payload_for_gossip( + parent_block: &ProtoBlock, + block: BeaconBlockRef<'_, T::EthSpec>, + chain: &BeaconChain, +) -> Result<(), BlockError> { + // Only apply this validation if this is a merge beacon block. + if let Ok(execution_payload) = block.body().execution_payload() { + // This logic should match `is_execution_enabled`. We use only the execution block hash of + // the parent here in order to avoid loading the parent state during gossip verification. + + let is_merge_transition_complete = match parent_block.execution_status { + // Optimistically declare that an "unknown" status block has completed the merge. + ExecutionStatus::Valid(_) | ExecutionStatus::Unknown(_) => true, + // It's impossible for an irrelevant block to have completed the merge. It is pre-merge + // by definition. + ExecutionStatus::Irrelevant(_) => false, + // If the parent has an invalid payload then it's impossible to build a valid block upon + // it. Reject the block. + ExecutionStatus::Invalid(_) => { + return Err(BlockError::ParentExecutionPayloadInvalid { + parent_root: parent_block.root, + }) + } + }; + + if is_merge_transition_complete || execution_payload != &<_>::default() { + let expected_timestamp = chain + .slot_clock + .start_of(block.slot()) + .map(|d| d.as_secs()) + .ok_or(BlockError::BeaconChainError( + BeaconChainError::UnableToComputeTimeAtSlot, + ))?; + + // The block's execution payload timestamp is correct with respect to the slot + if execution_payload.timestamp != expected_timestamp { + return Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::InvalidPayloadTimestamp { + expected: expected_timestamp, + found: execution_payload.timestamp, + }, + )); + } + } + } + + Ok(()) +} + +/// Gets an execution payload for inclusion in a block. +/// +/// ## Errors +/// +/// Will return an error when using a pre-merge fork `state`. Ensure to only run this function +/// after the merge fork. +/// +/// ## Specification +/// +/// Equivalent to the `get_execution_payload` function in the Validator Guide: +/// +/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal +pub fn get_execution_payload( + chain: &BeaconChain, + state: &BeaconState, +) -> Result, BlockProductionError> { + Ok(prepare_execution_payload_blocking(chain, state)?.unwrap_or_default()) +} + +/// Wraps the async `prepare_execution_payload` function as a blocking task. +pub fn prepare_execution_payload_blocking( + chain: &BeaconChain, + state: &BeaconState, +) -> Result>, BlockProductionError> { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BlockProductionError::ExecutionLayerMissing)?; + + execution_layer + .block_on_generic(|_| async { prepare_execution_payload(chain, state).await }) + .map_err(BlockProductionError::BlockingFailed)? +} + +/// Prepares an execution payload for inclusion in a block. +/// +/// Will return `Ok(None)` if the merge fork has occurred, but a terminal block has not been found. +/// +/// ## Errors +/// +/// Will return an error when using a pre-merge fork `state`. Ensure to only run this function +/// after the merge fork. +/// +/// ## Specification +/// +/// Equivalent to the `prepare_execution_payload` function in the Validator Guide: +/// +/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal +pub async fn prepare_execution_payload( + chain: &BeaconChain, + state: &BeaconState, +) -> Result>, BlockProductionError> { + let spec = &chain.spec; + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BlockProductionError::ExecutionLayerMissing)?; + + let parent_hash = if !is_merge_transition_complete(state) { + let is_terminal_block_hash_set = spec.terminal_block_hash != Hash256::zero(); + let is_activation_epoch_reached = + state.current_epoch() >= spec.terminal_block_hash_activation_epoch; + + if is_terminal_block_hash_set && !is_activation_epoch_reached { + return Ok(None); + } + + let terminal_pow_block_hash = execution_layer + .get_terminal_pow_block_hash(spec) + .await + .map_err(BlockProductionError::TerminalPoWBlockLookupFailed)?; + + if let Some(terminal_pow_block_hash) = terminal_pow_block_hash { + terminal_pow_block_hash + } else { + return Ok(None); + } + } else { + state.latest_execution_payload_header()?.block_hash + }; + + let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; + let random = *state.get_randao_mix(state.current_epoch())?; + let finalized_root = state.finalized_checkpoint().root; + + // The finalized block hash is not included in the specification, however we provide this + // parameter so that the execution layer can produce a payload id if one is not already known + // (e.g., due to a recent reorg). + let finalized_block_hash = + if let Some(block) = chain.fork_choice.read().get_block(&finalized_root) { + block.execution_status.block_hash() + } else { + chain + .store + .get_block(&finalized_root) + .map_err(BlockProductionError::FailedToReadFinalizedBlock)? + .ok_or(BlockProductionError::MissingFinalizedBlock(finalized_root))? + .message() + .body() + .execution_payload() + .ok() + .map(|ep| ep.block_hash) + }; + + // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. + let execution_payload = execution_layer + .get_payload( + parent_hash, + timestamp, + random, + finalized_block_hash.unwrap_or_else(Hash256::zero), + ) + .await + .map_err(BlockProductionError::GetPayloadFailed)?; + + Ok(Some(execution_payload)) +} diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 8d0545c58c..3ae3bf8a3e 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,10 +1,13 @@ use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::ForkChoice; +use fork_choice::{ForkChoice, PayloadVerificationStatus}; use itertools::process_results; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; -use state_processing::{per_block_processing, per_block_processing::BlockSignatureStrategy}; +use state_processing::{ + per_block_processing, per_block_processing::BlockSignatureStrategy, VerifyBlockRoot, +}; use std::sync::Arc; +use std::time::Duration; use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; @@ -160,13 +163,29 @@ pub fn reset_fork_choice_to_finalization, Cold: It &block, None, BlockSignatureStrategy::NoVerification, + VerifyBlockRoot::True, spec, ) .map_err(|e| format!("Error replaying block: {:?}", e))?; + // Setting this to unverified is the safest solution, since we don't have a way to + // retro-actively determine if they were valid or not. + // + // This scenario is so rare that it seems OK to double-verify some blocks. + let payload_verification_status = PayloadVerificationStatus::NotVerified; + let (block, _) = block.deconstruct(); fork_choice - .on_block(block.slot(), &block, block.canonical_root(), &state, spec) + .on_block( + block.slot(), + &block, + block.canonical_root(), + // Reward proposer boost. We are reinforcing the canonical chain. + Duration::from_secs(0), + &state, + payload_verification_status, + spec, + ) .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 2cd636f23b..768a869551 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -9,9 +9,11 @@ mod block_times_cache; mod block_verification; pub mod builder; pub mod chain_config; +mod early_attester_cache; mod errors; pub mod eth1_chain; pub mod events; +mod execution_payload; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; @@ -36,7 +38,8 @@ mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, HeadInfo, StateSkipConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + ForkChoiceError, HeadInfo, HeadSafetyStatus, StateSkipConfig, WhenSlotSkipped, + MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::chain_config::ChainConfig; @@ -44,7 +47,7 @@ pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; -pub use block_verification::{BlockError, GossipVerifiedBlock}; +pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBlock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; pub use metrics::scrape_for_metrics; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 2967d40a18..28eacad559 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -4,8 +4,12 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use lazy_static::lazy_static; pub use lighthouse_metrics::*; use slot_clock::SlotClock; +use std::time::Duration; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; +/// The maximum time to wait for the snapshot cache lock during a metrics scrape. +const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100); + lazy_static! { /* * Block Processing @@ -18,6 +22,18 @@ lazy_static! { "beacon_block_processing_successes_total", "Count of blocks processed without error" ); + pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE: Result = try_create_int_gauge( + "beacon_block_processing_snapshot_cache_size", + "Count snapshots in the snapshot cache" + ); + pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES: Result = try_create_int_counter( + "beacon_block_processing_snapshot_cache_misses", + "Count of snapshot cache misses" + ); + pub static ref BLOCK_PROCESSING_SNAPSHOT_CACHE_CLONES: Result = try_create_int_counter( + "beacon_block_processing_snapshot_cache_clones", + "Count of snapshot cache clones" + ); pub static ref BLOCK_PROCESSING_TIMES: Result = try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing"); pub static ref BLOCK_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( @@ -107,6 +123,11 @@ lazy_static! { "Number of attestations in a block" ); + pub static ref BLOCK_SIZE: Result = try_create_histogram( + "beacon_block_total_size", + "Size of a signed beacon block" + ); + /* * Unaggregated Attestation Verification */ @@ -227,6 +248,14 @@ lazy_static! { pub static ref SHUFFLING_CACHE_MISSES: Result = try_create_int_counter("beacon_shuffling_cache_misses_total", "Count of times shuffling cache fulfils request"); + /* + * Early attester cache + */ + pub static ref BEACON_EARLY_ATTESTER_CACHE_HITS: Result = try_create_int_counter( + "beacon_early_attester_cache_hits", + "Count of times the early attester cache returns an attestation" + ); + /* * Attestation Production */ @@ -892,6 +921,16 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { let attestation_stats = beacon_chain.op_pool.attestation_stats(); + if let Some(snapshot_cache) = beacon_chain + .snapshot_cache + .try_write_for(SNAPSHOT_CACHE_TIMEOUT) + { + set_gauge( + &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, + snapshot_cache.len() as i64, + ) + } + set_gauge_by_usize( &OP_POOL_NUM_ATTESTATIONS, attestation_stats.num_attestations, diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index b2a925bb77..5ae7627321 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -360,13 +360,11 @@ impl, Cold: ItemStore> BackgroundMigrator= old_finalized_slot) @@ -416,7 +414,7 @@ impl, Cold: ItemStore> BackgroundMigrator DBColumn { - DBColumn::ForkChoice - } +macro_rules! impl_store_item { + ($type:ty) => { + impl StoreItem for $type { + fn db_column() -> DBColumn { + DBColumn::ForkChoice + } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } - fn from_store_bytes(bytes: &[u8]) -> std::result::Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } + fn from_store_bytes(bytes: &[u8]) -> std::result::Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } + } + }; } + +impl_store_item!(PersistedForkChoiceV1); +impl_store_item!(PersistedForkChoiceV7); +impl_store_item!(PersistedForkChoiceV8); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index ec92b7c8ac..6d797ab37b 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,7 +1,14 @@ //! Utilities for managing database schema changes. -use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; +mod migration_schema_v6; +mod migration_schema_v7; +mod migration_schema_v8; +mod types; + +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}; +use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use operation_pool::{PersistedOperationPool, PersistedOperationPoolBase}; +use slog::{warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::fs; @@ -20,6 +27,7 @@ pub fn migrate_schema( datadir: &Path, from: SchemaVersion, to: SchemaVersion, + log: Logger, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to iself is always OK, a no-op. @@ -27,8 +35,8 @@ pub fn migrate_schema( // Migrate across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), datadir, from, next)?; - migrate_schema::(db, datadir, next, to) + migrate_schema::(db.clone(), datadir, from, next, log.clone())?; + migrate_schema::(db, datadir, next, to, log) } // Migration from v0.3.0 to v0.3.x, adding the temporary states column. // Nothing actually needs to be done, but once a DB uses v2 it shouldn't go back. @@ -93,6 +101,86 @@ pub fn migrate_schema( Ok(()) } + // Migration for adding `execution_status` field to the fork choice store. + (SchemaVersion(5), SchemaVersion(6)) => { + // Database operations to be done atomically + let mut ops = vec![]; + + // The top-level `PersistedForkChoice` struct is still V1 but will have its internal + // bytes for the fork choice updated to V6. + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(mut persisted_fork_choice) = fork_choice_opt { + migration_schema_v6::update_execution_statuses::(&mut persisted_fork_choice) + .map_err(StoreError::SchemaMigrationError)?; + + // Store the converted fork choice store under the same key. + ops.push(persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + // 1. Add `proposer_boost_root`. + // 2. Update `justified_epoch` to `justified_checkpoint` and `finalized_epoch` to + // `finalized_checkpoint`. + // 3. This migration also includes a potential update to the justified + // checkpoint in case the fork choice store's justified checkpoint and finalized checkpoint + // combination does not actually exist for any blocks in fork choice. This was possible in + // the consensus spec prior to v1.1.6. + // + // Relevant issues: + // + // https://github.com/sigp/lighthouse/issues/2741 + // https://github.com/ethereum/consensus-specs/pull/2727 + // https://github.com/ethereum/consensus-specs/pull/2730 + (SchemaVersion(6), SchemaVersion(7)) => { + // Database operations to be done atomically + let mut ops = vec![]; + + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(persisted_fork_choice_v1) = fork_choice_opt { + // This migrates the `PersistedForkChoiceStore`, adding the `proposer_boost_root` field. + let mut persisted_fork_choice_v7 = persisted_fork_choice_v1.into(); + + let result = migration_schema_v7::update_fork_choice::( + &mut persisted_fork_choice_v7, + db.clone(), + ); + + // Fall back to re-initializing fork choice from an anchor state if necessary. + if let Err(e) = result { + warn!(log, "Unable to migrate to database schema 7, re-initializing fork choice"; "error" => ?e); + migration_schema_v7::update_with_reinitialized_fork_choice::( + &mut persisted_fork_choice_v7, + db.clone(), + ) + .map_err(StoreError::SchemaMigrationError)?; + } + + // Store the converted fork choice store under the same key. + ops.push(persisted_fork_choice_v7.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + // Migration to add an `epoch` key to the fork choice's balances cache. + (SchemaVersion(7), SchemaVersion(8)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = + migration_schema_v8::update_fork_choice::(fork_choice, db.clone())?; + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/README.md b/beacon_node/beacon_chain/src/schema_change/README.md new file mode 100644 index 0000000000..1a33b3c126 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/README.md @@ -0,0 +1,74 @@ +Database Schema Migrations +==== + +This document is an attempt to record some best practices and design conventions for applying +database schema migrations within Lighthouse. + +## General Structure + +If you make a breaking change to an on-disk data structure you need to increment the +`SCHEMA_VERSION` in `beacon_node/store/src/metadata.rs` and add a migration from the previous +version to the new version. + +The entry-point for database migrations is in `schema_change.rs`, _not_ `migrate.rs` (which deals +with finalization). Supporting code for a specific migration may be added in +`schema_change/migration_schema_vX.rs`, where `X` is the version being migrated _to_. + +## Combining Schema Changes + +Schema changes may be combined if they are part of the same pull request to +`unstable`. Once a schema version is defined in `unstable` we should not apply changes to it +without incrementing the version. This prevents conflicts between versions that appear to be the +same. This allows us to deploy `unstable` to nodes without having to worry about needing to resync +because of a sneaky schema change. + +Changing the on-disk structure for a version _before_ it is merged to `unstable` is OK. You will +just have to handle manually resyncing any test nodes (use checkpoint sync). + +## Naming Conventions + +Prefer to name versions of structs by _the version at which the change was introduced_. For example +if you add a field to `Foo` in v9, call the previous version `FooV1` (assuming this is `Foo`'s first +migration) and write a schema change that migrates from `FooV1` to `FooV9`. + +Prefer to use explicit version names in `schema_change.rs` and the `schema_change` module. To +interface with the outside either: + +1. Define a type alias to the latest version, e.g. `pub type Foo = FooV9`, or +2. Define a mapping from the latest version to the version used elsewhere, e.g. + ```rust + impl From for Foo {} + ``` + +Avoid names like: + +* `LegacyFoo` +* `OldFoo` +* `FooWithoutX` + +## First-version vs Last-version + +Previously the schema migration code would name types by the _last_ version at which they were +valid. For example if `Foo` changed in `V9` then we would name the two variants `FooV8` and `FooV9`. +The problem with this scheme is that if `Foo` changes again in the future at say v12 then `FooV9` would +need to be renamed to `FooV11`, which is annoying. Using the _first_ valid version as described +above does not have this issue. + +## Using SuperStruct + +If possible, consider using [`superstruct`](https://crates.io/crates/superstruct) to handle data +structure changes between versions. + +* Use `superstruct(no_enum)` to avoid generating an unnecessary top-level enum. + +## Example + +A field is added to `Foo` in v9, and there are two variants: `FooV1` and `FooV9`. There is a +migration from `FooV1` to `FooV9`. `Foo` is aliased to `FooV9`. + +Some time later another field is added to `Foo` in v12. A new `FooV12` is created, along with a +migration from `FooV9` to `FooV12`. The primary `Foo` type gets re-aliased to `FooV12`. The previous +migration from V1 to V9 shouldn't break because the schema migration refers to `FooV9` explicitly +rather than `Foo`. Due to the re-aliasing (or re-mapping) the compiler will check every usage +of `Foo` to make sure that it still makes sense with `FooV12`. + diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs new file mode 100644 index 0000000000..231da838cd --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v6.rs @@ -0,0 +1,28 @@ +///! These functions and structs are only relevant to the database migration from schema 5 to 6. +use crate::persisted_fork_choice::PersistedForkChoiceV1; +use crate::schema_change::types::{SszContainerV1, SszContainerV6}; +use crate::BeaconChainTypes; +use ssz::four_byte_option_impl; +use ssz::{Decode, Encode}; + +// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union +// selector. +four_byte_option_impl!(four_byte_option_usize, usize); + +pub(crate) fn update_execution_statuses( + persisted_fork_choice: &mut PersistedForkChoiceV1, +) -> Result<(), String> { + let ssz_container_v1 = + SszContainerV1::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes) + .map_err(|e| { + format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + ) + })?; + + let ssz_container_v6: SszContainerV6 = ssz_container_v1.into(); + + persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v6.as_ssz_bytes(); + Ok(()) +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs new file mode 100644 index 0000000000..ebf89ec22e --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -0,0 +1,327 @@ +///! These functions and structs are only relevant to the database migration from schema 6 to 7. +use crate::beacon_chain::BeaconChainTypes; +use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; +use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; +use crate::schema_change::types::{ProtoNodeV6, SszContainerV6, SszContainerV7}; +use crate::types::{Checkpoint, Epoch, Hash256}; +use crate::types::{EthSpec, Slot}; +use crate::{BeaconForkChoiceStore, BeaconSnapshot}; +use fork_choice::ForkChoice; +use proto_array::{core::ProtoNode, core::SszContainer, ProtoArrayForkChoice}; +use ssz::four_byte_option_impl; +use ssz::{Decode, Encode}; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use store::hot_cold_store::HotColdDB; +use store::iter::BlockRootsIterator; +use store::Error as StoreError; + +// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union +// selector. +four_byte_option_impl!(four_byte_option_usize, usize); + +/// This method is used to re-initialize fork choice from the finalized state in case we hit an +/// error during this migration. +pub(crate) fn update_with_reinitialized_fork_choice( + persisted_fork_choice: &mut PersistedForkChoiceV7, + db: Arc>, +) -> Result<(), String> { + let anchor_block_root = persisted_fork_choice + .fork_choice_store + .finalized_checkpoint + .root; + let anchor_block = db + .get_block(&anchor_block_root) + .map_err(|e| format!("{:?}", e))? + .ok_or_else(|| "Missing anchor beacon block".to_string())?; + let anchor_state = db + .get_state(&anchor_block.state_root(), Some(anchor_block.slot())) + .map_err(|e| format!("{:?}", e))? + .ok_or_else(|| "Missing anchor beacon state".to_string())?; + let snapshot = BeaconSnapshot { + beacon_block: anchor_block, + beacon_block_root: anchor_block_root, + beacon_state: anchor_state, + }; + let store = BeaconForkChoiceStore::get_forkchoice_store(db, &snapshot); + let fork_choice = ForkChoice::from_anchor( + store, + anchor_block_root, + &snapshot.beacon_block, + &snapshot.beacon_state, + ) + .map_err(|e| format!("{:?}", e))?; + persisted_fork_choice.fork_choice = fork_choice.to_persisted(); + Ok(()) +} + +pub(crate) fn update_fork_choice( + persisted_fork_choice: &mut PersistedForkChoiceV7, + db: Arc>, +) -> Result<(), StoreError> { + // `PersistedForkChoice` stores the `ProtoArray` as a `Vec`. Deserialize these + // bytes assuming the legacy struct, and transform them to the new struct before + // re-serializing. + let ssz_container_v6 = + SszContainerV6::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes) + .map_err(|e| { + StoreError::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + // Clone the V6 proto nodes in order to maintain information about `node.justified_epoch` + // and `node.finalized_epoch`. + let nodes_v6 = ssz_container_v6.nodes.clone(); + + let justified_checkpoint = persisted_fork_choice.fork_choice_store.justified_checkpoint; + let finalized_checkpoint = persisted_fork_choice.fork_choice_store.finalized_checkpoint; + + // These transformations instantiate `node.justified_checkpoint` and `node.finalized_checkpoint` + // to `None`. + let ssz_container_v7: SszContainerV7 = + ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint); + let ssz_container: SszContainer = ssz_container_v7.into(); + let mut fork_choice: ProtoArrayForkChoice = ssz_container.into(); + + update_checkpoints::(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db) + .map_err(StoreError::SchemaMigrationError)?; + + // Update the justified checkpoint in the store in case we have a discrepancy + // between the store and the proto array nodes. + update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice) + .map_err(StoreError::SchemaMigrationError)?; + + Ok(()) +} + +struct HeadInfo { + index: usize, + root: Hash256, + slot: Slot, +} + +fn update_checkpoints( + finalized_root: Hash256, + nodes_v6: &[ProtoNodeV6], + fork_choice: &mut ProtoArrayForkChoice, + db: Arc>, +) -> Result<(), String> { + let heads = find_finalized_descendant_heads(finalized_root, fork_choice); + + // For each head, first gather all epochs we will need to find justified or finalized roots for. + for head in heads { + // `relevant_epochs` are epochs for which we will need to find the root at the start slot. + // We don't need to worry about whether the are finalized or justified epochs. + let mut relevant_epochs = HashSet::new(); + let relevant_epoch_finder = |index, _: &mut ProtoNode| { + let (justified_epoch, finalized_epoch) = nodes_v6 + .get(index) + .map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch)) + .ok_or_else(|| "Index not found in legacy proto nodes".to_string())?; + relevant_epochs.insert(justified_epoch); + relevant_epochs.insert(finalized_epoch); + Ok(()) + }; + + apply_to_chain_of_ancestors( + finalized_root, + head.index, + fork_choice, + relevant_epoch_finder, + )?; + + // find the block roots associated with each relevant epoch. + let roots_by_epoch = + map_relevant_epochs_to_roots::(head.root, head.slot, relevant_epochs, db.clone())?; + + // Apply this mutator to the chain of descendants from this head, adding justified + // and finalized checkpoints for each. + let node_mutator = |index, node: &mut ProtoNode| { + let (justified_epoch, finalized_epoch) = nodes_v6 + .get(index) + .map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch)) + .ok_or_else(|| "Index not found in legacy proto nodes".to_string())?; + + // Update the checkpoints only if they haven't already been populated. + if node.justified_checkpoint.is_none() { + let justified_checkpoint = + roots_by_epoch + .get(&justified_epoch) + .map(|&root| Checkpoint { + epoch: justified_epoch, + root, + }); + node.justified_checkpoint = justified_checkpoint; + } + if node.finalized_checkpoint.is_none() { + let finalized_checkpoint = + roots_by_epoch + .get(&finalized_epoch) + .map(|&root| Checkpoint { + epoch: finalized_epoch, + root, + }); + node.finalized_checkpoint = finalized_checkpoint; + } + + Ok(()) + }; + + apply_to_chain_of_ancestors(finalized_root, head.index, fork_choice, node_mutator)?; + } + Ok(()) +} + +/// Coverts the given `HashSet` to a `Vec` then reverse sorts by `Epoch`. Next, a +/// single `BlockRootsIterator` is created which is used to iterate backwards from the given +/// `head_root` and `head_slot`, finding the block root at the start slot of each epoch. +fn map_relevant_epochs_to_roots( + head_root: Hash256, + head_slot: Slot, + epochs: HashSet, + db: Arc>, +) -> Result, String> { + // Convert the `HashSet` to a `Vec` and reverse sort the epochs. + let mut relevant_epochs = epochs.into_iter().collect::>(); + relevant_epochs.sort_unstable_by(|a, b| b.cmp(a)); + + // Iterate backwards from the given `head_root` and `head_slot` and find the block root at each epoch. + let mut iter = std::iter::once(Ok((head_root, head_slot))) + .chain(BlockRootsIterator::from_block(&db, head_root).map_err(|e| format!("{:?}", e))?); + let mut roots_by_epoch = HashMap::new(); + for epoch in relevant_epochs { + let start_slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); + + let root = iter + .find_map(|next| match next { + Ok((root, slot)) => (slot == start_slot).then(|| Ok(root)), + Err(e) => Some(Err(format!("{:?}", e))), + }) + .transpose()? + .ok_or_else(|| "Justified root not found".to_string())?; + roots_by_epoch.insert(epoch, root); + } + Ok(roots_by_epoch) +} + +/// Applies a mutator to every node in a chain, starting from the node at the given +/// `head_index` and iterating through ancestors until the `finalized_root` is reached. +fn apply_to_chain_of_ancestors( + finalized_root: Hash256, + head_index: usize, + fork_choice: &mut ProtoArrayForkChoice, + mut node_mutator: F, +) -> Result<(), String> +where + F: FnMut(usize, &mut ProtoNode) -> Result<(), String>, +{ + let head = fork_choice + .core_proto_array_mut() + .nodes + .get_mut(head_index) + .ok_or_else(|| "Head index not found in proto nodes".to_string())?; + + node_mutator(head_index, head)?; + + let mut parent_index_opt = head.parent; + let mut parent_opt = + parent_index_opt.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index)); + + // Iterate backwards through all parents until there is no reference to a parent or we reach + // the `finalized_root` node. + while let (Some(parent), Some(parent_index)) = (parent_opt, parent_index_opt) { + node_mutator(parent_index, parent)?; + + // Break out of this while loop *after* the `node_mutator` has been applied to the finalized + // node. + if parent.root == finalized_root { + break; + } + + // Update parent values + parent_index_opt = parent.parent; + parent_opt = parent_index_opt + .and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index)); + } + Ok(()) +} + +/// Finds all heads by finding all nodes in the proto array that are not referenced as parents. Then +/// checks that these nodes are descendants of the finalized root in order to determine if they are +/// relevant. +fn find_finalized_descendant_heads( + finalized_root: Hash256, + fork_choice: &ProtoArrayForkChoice, +) -> Vec { + let nodes_referenced_as_parents: HashSet = fork_choice + .core_proto_array() + .nodes + .iter() + .filter_map(|node| node.parent) + .collect::>(); + + fork_choice + .core_proto_array() + .nodes + .iter() + .enumerate() + .filter_map(|(index, node)| { + (!nodes_referenced_as_parents.contains(&index) + && fork_choice.is_descendant(finalized_root, node.root)) + .then(|| HeadInfo { + index, + root: node.root, + slot: node.slot, + }) + }) + .collect::>() +} + +fn update_store_justified_checkpoint( + persisted_fork_choice: &mut PersistedForkChoiceV7, + fork_choice: &mut ProtoArrayForkChoice, +) -> Result<(), String> { + let justified_checkpoint = fork_choice + .core_proto_array() + .nodes + .iter() + .filter_map(|node| { + (node.finalized_checkpoint + == Some(persisted_fork_choice.fork_choice_store.finalized_checkpoint)) + .then(|| node.justified_checkpoint) + .flatten() + }) + .max_by_key(|justified_checkpoint| justified_checkpoint.epoch) + .ok_or("Proto node with current finalized checkpoint not found")?; + + fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint; + persisted_fork_choice.fork_choice.proto_array_bytes = fork_choice.as_bytes(); + persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; + Ok(()) +} + +// Add a zero `proposer_boost_root` when migrating from V1-6 to V7. +impl From for PersistedForkChoiceStoreV7 { + fn from(other: PersistedForkChoiceStoreV1) -> Self { + Self { + balances_cache: other.balances_cache, + time: other.time, + finalized_checkpoint: other.finalized_checkpoint, + justified_checkpoint: other.justified_checkpoint, + justified_balances: other.justified_balances, + best_justified_checkpoint: other.best_justified_checkpoint, + proposer_boost_root: Hash256::zero(), + } + } +} + +impl From for PersistedForkChoiceV7 { + fn from(other: PersistedForkChoiceV1) -> Self { + Self { + fork_choice: other.fork_choice, + fork_choice_store: other.fork_choice_store.into(), + } + } +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs new file mode 100644 index 0000000000..5998eaa125 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs @@ -0,0 +1,50 @@ +use crate::beacon_chain::BeaconChainTypes; +use crate::beacon_fork_choice_store::{ + BalancesCacheV8, CacheItemV8, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, +}; +use crate::persisted_fork_choice::{PersistedForkChoiceV7, PersistedForkChoiceV8}; +use std::sync::Arc; +use store::{Error as StoreError, HotColdDB}; +use types::EthSpec; + +pub fn update_fork_choice( + fork_choice: PersistedForkChoiceV7, + db: Arc>, +) -> Result { + let PersistedForkChoiceStoreV7 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + proposer_boost_root, + } = fork_choice.fork_choice_store; + let mut fork_choice_store = PersistedForkChoiceStoreV8 { + balances_cache: BalancesCacheV8::default(), + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + proposer_boost_root, + }; + + // Add epochs to the balances cache. It's safe to just use the block's epoch because + // before schema v8 the cache would always miss on skipped slots. + for item in balances_cache.items { + // Drop any blocks that aren't found, they're presumably too old and this is only a cache. + if let Some(block) = db.get_block(&item.block_root)? { + fork_choice_store.balances_cache.items.push(CacheItemV8 { + block_root: item.block_root, + epoch: block.slot().epoch(T::EthSpec::slots_per_epoch()), + balances: item.balances, + }); + } + } + + Ok(PersistedForkChoiceV8 { + fork_choice: fork_choice.fork_choice, + fork_choice_store, + }) +} diff --git a/beacon_node/beacon_chain/src/schema_change/types.rs b/beacon_node/beacon_chain/src/schema_change/types.rs new file mode 100644 index 0000000000..8d41a384f6 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/types.rs @@ -0,0 +1,192 @@ +use crate::types::{AttestationShufflingId, Checkpoint, Epoch, Hash256, Slot}; +use proto_array::core::{ProposerBoost, ProtoNode, SszContainer, VoteTracker}; +use proto_array::ExecutionStatus; +use ssz::four_byte_option_impl; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use superstruct::superstruct; + +// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union +// selector. +four_byte_option_impl!(four_byte_option_usize, usize); +four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); + +#[superstruct( + variants(V1, V6, V7), + variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)), + no_enum +)] +pub struct ProtoNode { + pub slot: Slot, + pub state_root: Hash256, + pub target_root: Hash256, + pub current_epoch_shuffling_id: AttestationShufflingId, + pub next_epoch_shuffling_id: AttestationShufflingId, + pub root: Hash256, + #[ssz(with = "four_byte_option_usize")] + pub parent: Option, + #[superstruct(only(V1, V6))] + pub justified_epoch: Epoch, + #[superstruct(only(V1, V6))] + pub finalized_epoch: Epoch, + #[ssz(with = "four_byte_option_checkpoint")] + #[superstruct(only(V7))] + pub justified_checkpoint: Option, + #[ssz(with = "four_byte_option_checkpoint")] + #[superstruct(only(V7))] + pub finalized_checkpoint: Option, + pub weight: u64, + #[ssz(with = "four_byte_option_usize")] + pub best_child: Option, + #[ssz(with = "four_byte_option_usize")] + pub best_descendant: Option, + #[superstruct(only(V6, V7))] + pub execution_status: ExecutionStatus, +} + +impl Into for ProtoNodeV1 { + fn into(self) -> ProtoNodeV6 { + ProtoNodeV6 { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_epoch: self.justified_epoch, + finalized_epoch: self.finalized_epoch, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + // We set the following execution value as if the block is a pre-merge-fork block. This + // is safe as long as we never import a merge block with the old version of proto-array. + // This will be safe since we can't actually process merge blocks until we've made this + // change to fork choice. + execution_status: ExecutionStatus::irrelevant(), + } + } +} + +impl Into for ProtoNodeV6 { + fn into(self) -> ProtoNodeV7 { + ProtoNodeV7 { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: None, + finalized_checkpoint: None, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + } + } +} + +impl Into for ProtoNodeV7 { + fn into(self) -> ProtoNode { + ProtoNode { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + } + } +} + +#[superstruct( + variants(V1, V6, V7), + variant_attributes(derive(Encode, Decode)), + no_enum +)] +#[derive(Encode, Decode)] +pub struct SszContainer { + pub votes: Vec, + pub balances: Vec, + pub prune_threshold: usize, + #[superstruct(only(V1, V6))] + pub justified_epoch: Epoch, + #[superstruct(only(V1, V6))] + pub finalized_epoch: Epoch, + #[superstruct(only(V7))] + pub justified_checkpoint: Checkpoint, + #[superstruct(only(V7))] + pub finalized_checkpoint: Checkpoint, + #[superstruct(only(V1))] + pub nodes: Vec, + #[superstruct(only(V6))] + pub nodes: Vec, + #[superstruct(only(V7))] + pub nodes: Vec, + pub indices: Vec<(Hash256, usize)>, + #[superstruct(only(V7))] + pub previous_proposer_boost: ProposerBoost, +} + +impl Into for SszContainerV1 { + fn into(self) -> SszContainerV6 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV6 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_epoch: self.justified_epoch, + finalized_epoch: self.finalized_epoch, + nodes, + indices: self.indices, + } + } +} + +impl SszContainerV6 { + pub(crate) fn into_ssz_container_v7( + self, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + ) -> SszContainerV7 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV7 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint, + finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: ProposerBoost::default(), + } + } +} + +impl Into for SszContainerV7 { + fn into(self) -> SszContainer { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainer { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + } + } +} diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index e273c35218..f4bbae8a32 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -1,12 +1,19 @@ use crate::BeaconSnapshot; +use itertools::process_results; use std::cmp; +use std::time::Duration; use types::{ - beacon_state::CloneConfig, BeaconState, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + beacon_state::CloneConfig, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, + Slot, }; /// The default size of the cache. pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4; +/// The minimum block delay to clone the state in the cache instead of removing it. +/// This helps keep block processing fast during re-orgs from late blocks. +const MINIMUM_BLOCK_DELAY_FOR_CLONE: Duration = Duration::from_secs(6); + /// This snapshot is to be used for verifying a child of `self.beacon_block`. #[derive(Debug)] pub struct PreProcessingSnapshot { @@ -62,6 +69,22 @@ impl CacheItem { beacon_state_root, } } + + pub fn clone_as_pre_state(&self) -> PreProcessingSnapshot { + // Do not include the beacon state root if the state has been advanced. + let beacon_state_root = + Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); + + PreProcessingSnapshot { + beacon_block: self.beacon_block.clone(), + beacon_block_root: self.beacon_block_root, + pre_state: self + .pre_state + .as_ref() + .map_or_else(|| self.beacon_state.clone(), |pre_state| pre_state.clone()), + beacon_state_root, + } + } } /// The information required for block production. @@ -142,9 +165,25 @@ impl SnapshotCache { } } + /// The block roots of all snapshots contained in `self`. + pub fn beacon_block_roots(&self) -> Vec { + self.snapshots.iter().map(|s| s.beacon_block_root).collect() + } + + /// The number of snapshots contained in `self`. + pub fn len(&self) -> usize { + self.snapshots.len() + } + /// Insert a snapshot, potentially removing an existing snapshot if `self` is at capacity (see /// struct-level documentation for more info). - pub fn insert(&mut self, snapshot: BeaconSnapshot, pre_state: Option>) { + pub fn insert( + &mut self, + snapshot: BeaconSnapshot, + pre_state: Option>, + spec: &ChainSpec, + ) { + let parent_root = snapshot.beacon_block.message().parent_root(); let item = CacheItem { beacon_block: snapshot.beacon_block, beacon_block_root: snapshot.beacon_block_root, @@ -152,6 +191,25 @@ impl SnapshotCache { pre_state, }; + // Remove the grandparent of the block that was just inserted. + // + // Assuming it's unlikely to see re-orgs deeper than one block, this method helps keep the + // cache small by removing any states that already have more than one descendant. + // + // Remove the grandparent first to free up room in the cache. + let grandparent_result = + process_results(item.beacon_state.rev_iter_block_roots(spec), |iter| { + iter.map(|(_slot, root)| root) + .find(|root| *root != item.beacon_block_root && *root != parent_root) + }); + if let Ok(Some(grandparent_root)) = grandparent_result { + let head_block_root = self.head_block_root; + self.snapshots.retain(|snapshot| { + let root = snapshot.beacon_block_root; + root == head_block_root || root != grandparent_root + }); + } + if self.snapshots.len() < self.max_len { self.snapshots.push(item); } else { @@ -178,11 +236,36 @@ impl SnapshotCache { /// If available, returns a `CacheItem` that should be used for importing/processing a block. /// The method will remove the block from `self`, carrying across any caches that may or may not /// be built. - pub fn get_state_for_block_processing(&mut self, block_root: Hash256) -> Option> { + /// + /// In the event the block being processed was observed late, clone the cache instead of + /// moving it. This allows us to process the next block quickly in the case of a re-org. + /// Additionally, if the slot was skipped, clone the cache. This ensures blocks that are + /// later than 1 slot still have access to the cache and can be processed quickly. + pub fn get_state_for_block_processing( + &mut self, + block_root: Hash256, + block_slot: Slot, + block_delay: Option, + spec: &ChainSpec, + ) -> Option<(PreProcessingSnapshot, bool)> { self.snapshots .iter() .position(|snapshot| snapshot.beacon_block_root == block_root) - .map(|i| self.snapshots.remove(i)) + .map(|i| { + if let Some(cache) = self.snapshots.get(i) { + if block_slot > cache.beacon_block.slot() + 1 { + return (cache.clone_as_pre_state(), true); + } + if let Some(delay) = block_delay { + if delay >= MINIMUM_BLOCK_DELAY_FOR_CLONE + && delay <= Duration::from_secs(spec.seconds_per_slot) * 4 + { + return (cache.clone_as_pre_state(), true); + } + } + } + (self.snapshots.remove(i).into_pre_state(), false) + }) } /// If available, obtains a clone of a `BeaconState` that should be used for block production. @@ -320,6 +403,7 @@ mod test { #[test] fn insert_get_prune_update() { + let spec = MainnetEthSpec::default_spec(); let mut cache = SnapshotCache::new(CACHE_SIZE, get_snapshot(0)); // Insert a bunch of entries in the cache. It should look like this: @@ -336,7 +420,7 @@ mod test { *snapshot.beacon_state.slot_mut() = Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1); - cache.insert(snapshot, None); + cache.insert(snapshot, None, &spec); assert_eq!( cache.snapshots.len(), @@ -354,12 +438,17 @@ mod test { // 2 2 // 3 3 assert_eq!(cache.snapshots.len(), CACHE_SIZE); - cache.insert(get_snapshot(42), None); + cache.insert(get_snapshot(42), None, &spec); assert_eq!(cache.snapshots.len(), CACHE_SIZE); assert!( cache - .get_state_for_block_processing(Hash256::from_low_u64_be(1)) + .get_state_for_block_processing( + Hash256::from_low_u64_be(1), + Slot::new(0), + None, + &spec + ) .is_none(), "the snapshot with the lowest slot should have been removed during the insert function" ); @@ -377,8 +466,14 @@ mod test { ); assert_eq!( cache - .get_state_for_block_processing(Hash256::from_low_u64_be(0)) + .get_state_for_block_processing( + Hash256::from_low_u64_be(0), + Slot::new(0), + None, + &spec + ) .expect("the head should still be in the cache") + .0 .beacon_block_root, Hash256::from_low_u64_be(0), "get_state_for_block_processing should get the correct snapshot" @@ -403,14 +498,20 @@ mod test { // Over-fill the cache so it needs to eject some old values on insert. for i in 0..CACHE_SIZE as u64 { - cache.insert(get_snapshot(u64::max_value() - i), None); + cache.insert(get_snapshot(u64::max_value() - i), None, &spec); } // Ensure that the new head value was not removed from the cache. assert_eq!( cache - .get_state_for_block_processing(Hash256::from_low_u64_be(2)) + .get_state_for_block_processing( + Hash256::from_low_u64_be(2), + Slot::new(0), + None, + &spec + ) .expect("the new head should still be in the cache") + .0 .beacon_block_root, Hash256::from_low_u64_be(2), "get_state_for_block_processing should get the correct snapshot" diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 947e8c38e0..6a3c3ea00e 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -41,9 +41,17 @@ const MAX_ADVANCE_DISTANCE: u64 = 4; enum Error { BeaconChain(BeaconChainError), HeadMissingFromSnapshotCache(Hash256), - MaxDistanceExceeded { current_slot: Slot, head_slot: Slot }, - StateAlreadyAdvanced { block_root: Hash256 }, - BadStateSlot { state_slot: Slot, block_slot: Slot }, + MaxDistanceExceeded { + current_slot: Slot, + head_slot: Slot, + }, + StateAlreadyAdvanced { + block_root: Hash256, + }, + BadStateSlot { + _state_slot: Slot, + _block_slot: Slot, + }, } impl From for Error { @@ -224,8 +232,8 @@ fn advance_head( // Advancing more than one slot without storing the intermediate state would corrupt the // database. Future works might store temporary, intermediate states inside this function. return Err(Error::BadStateSlot { - block_slot: head_slot, - state_slot: state.slot(), + _block_slot: head_slot, + _state_slot: state.slot(), }); }; diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 4bc5b439e1..fa7d4dcfed 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -273,7 +273,7 @@ impl VerifiedSyncContribution { let subcommittee_index = contribution.subcommittee_index as usize; // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance. - verify_propagation_slot_range(chain, contribution)?; + verify_propagation_slot_range(&chain.slot_clock, contribution)?; // Validate subcommittee index. if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { @@ -428,7 +428,7 @@ impl VerifiedSyncCommitteeMessage { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future sync committee messages for later processing. - verify_propagation_slot_range(chain, &sync_message)?; + verify_propagation_slot_range(&chain.slot_clock, &sync_message)?; // Ensure the `subnet_id` is valid for the given validator. let pubkey = chain @@ -516,14 +516,13 @@ impl VerifiedSyncCommitteeMessage { /// to the current slot of the `chain`. /// /// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. -pub fn verify_propagation_slot_range( - chain: &BeaconChain, +pub fn verify_propagation_slot_range( + slot_clock: &S, sync_contribution: &U, ) -> Result<(), Error> { let message_slot = sync_contribution.get_slot(); - let latest_permissible_slot = chain - .slot_clock + let latest_permissible_slot = slot_clock .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; if message_slot > latest_permissible_slot { @@ -533,8 +532,7 @@ pub fn verify_propagation_slot_range( }); } - let earliest_permissible_slot = chain - .slot_clock + let earliest_permissible_slot = slot_clock .now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .ok_or(BeaconChainError::UnableToReadSlot)?; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d407d83542..574895296d 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -11,37 +11,45 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; +use execution_layer::{ + test_utils::{ + ExecutionBlockGenerator, ExecutionLayerRuntime, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK, + }, + ExecutionLayer, +}; use futures::channel::mpsc::Receiver; -pub use genesis::interop_genesis_state; +pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use logging::test_logger; use merkle_proof::MerkleTree; use parking_lot::Mutex; +use parking_lot::RwLockWriteGuard; use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; use rayon::prelude::*; +use sensitive_url::SensitiveUrl; use slog::Logger; use slot_clock::TestingSlotClock; -use state_processing::state_advance::complete_state_advance; +use state_processing::{state_advance::complete_state_advance, StateRootStrategy}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use store::{config::StoreConfig, BlockReplay, HotColdDB, ItemStore, LevelDB, MemoryStore}; +use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; use task_executor::ShutdownReason; use tree_hash::TreeHash; use types::sync_selection_proof::SyncSelectionProof; pub use types::test_utils::generate_deterministic_keypairs; use types::{ - typenum::U4294967296, AggregateSignature, Attestation, AttestationData, AttesterSlashing, - BeaconBlock, BeaconState, BeaconStateHash, ChainSpec, Checkpoint, Deposit, DepositData, Domain, - Epoch, EthSpec, ForkName, Graffiti, Hash256, IndexedAttestation, Keypair, ProposerSlashing, - PublicKeyBytes, SelectionProof, SignatureBytes, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockHash, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, Slot, - SubnetId, SyncCommittee, SyncCommitteeContribution, SyncCommitteeMessage, VariableList, - VoluntaryExit, + typenum::U4294967296, Address, AggregateSignature, Attestation, AttestationData, + AttesterSlashing, BeaconBlock, BeaconState, BeaconStateHash, ChainSpec, Checkpoint, Deposit, + DepositData, Domain, Epoch, EthSpec, ForkName, Graffiti, Hash256, IndexedAttestation, Keypair, + ProposerSlashing, PublicKeyBytes, SelectionProof, SignatureBytes, SignedAggregateAndProof, + SignedBeaconBlock, SignedBeaconBlockHash, SignedContributionAndProof, SignedRoot, + SignedVoluntaryExit, Slot, SubnetId, SyncCommittee, SyncCommitteeContribution, + SyncCommitteeMessage, VariableList, VoluntaryExit, }; // 4th September 2019 @@ -147,6 +155,9 @@ pub struct Builder { store: Option>>, initial_mutator: Option>, store_mutator: Option>, + execution_layer: Option, + execution_layer_runtime: Option, + mock_execution_layer: Option>, log: Logger, } @@ -170,6 +181,8 @@ impl Builder> { let genesis_state = interop_genesis_state::( &validator_keypairs, HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, builder.get_spec(), ) .expect("should generate interop state"); @@ -215,6 +228,8 @@ impl Builder> { let genesis_state = interop_genesis_state::( &validator_keypairs, HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, builder.get_spec(), ) .expect("should generate interop state"); @@ -254,6 +269,9 @@ where store: None, initial_mutator: None, store_mutator: None, + execution_layer: None, + mock_execution_layer: None, + execution_layer_runtime: None, log: test_logger(), } } @@ -311,6 +329,56 @@ where self } + pub fn execution_layer(mut self, urls: &[&str]) -> Self { + assert!( + self.execution_layer.is_none(), + "execution layer already defined" + ); + + let el_runtime = ExecutionLayerRuntime::default(); + + let urls = urls + .iter() + .map(|s| SensitiveUrl::parse(*s)) + .collect::>() + .unwrap(); + let execution_layer = ExecutionLayer::from_urls( + urls, + Some(Address::repeat_byte(42)), + el_runtime.task_executor.clone(), + el_runtime.log.clone(), + ) + .unwrap(); + + self.execution_layer = Some(execution_layer); + self.execution_layer_runtime = Some(el_runtime); + self + } + + pub fn mock_execution_layer(mut self) -> Self { + let spec = self.spec.clone().expect("cannot build without spec"); + let mock = MockExecutionLayer::new( + spec.terminal_total_difficulty, + DEFAULT_TERMINAL_BLOCK, + spec.terminal_block_hash, + spec.terminal_block_hash_activation_epoch, + ); + self.execution_layer = Some(mock.el.clone()); + self.mock_execution_layer = Some(mock); + self + } + + /// Instruct the mock execution engine to always return a "valid" response to any payload it is + /// asked to execute. + pub fn mock_execution_layer_all_payloads_valid(self) -> Self { + self.mock_execution_layer + .as_ref() + .expect("requires mock execution layer") + .server + .all_payloads_valid(); + self + } + pub fn build(self) -> BeaconChainHarness> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); @@ -326,6 +394,7 @@ where .custom_spec(spec) .store(self.store.expect("cannot build without store")) .store_migrator_config(MigratorConfig::default().blocking()) + .execution_layer(self.execution_layer) .dummy_eth1_backend() .expect("should build dummy backend") .shutdown_sender(shutdown_tx) @@ -364,6 +433,8 @@ where chain: Arc::new(chain), validator_keypairs, shutdown_receiver, + mock_execution_layer: self.mock_execution_layer, + execution_layer_runtime: self.execution_layer_runtime, rng: make_rng(), } } @@ -380,6 +451,9 @@ pub struct BeaconChainHarness { pub spec: ChainSpec, pub shutdown_receiver: Receiver, + pub mock_execution_layer: Option>, + pub execution_layer_runtime: Option, + pub rng: Mutex, } @@ -407,6 +481,14 @@ where &self.chain.log } + pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator> { + self.mock_execution_layer + .as_ref() + .expect("harness was not built with mock execution layer") + .server + .execution_block_generator() + } + pub fn get_all_validators(&self) -> Vec { (0..self.validator_keypairs.len()).collect() } @@ -445,7 +527,7 @@ where pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store - .load_hot_state(&state_hash.into(), BlockReplay::Accurate) + .load_hot_state(&state_hash.into(), StateRootStrategy::Accurate) .unwrap() } @@ -1436,6 +1518,40 @@ where self.make_block(state, slot) } + /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. + pub fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + self.advance_slot(); + } + + let num_slots = target_slot + .as_usize() + .checked_sub(self.chain.slot().unwrap().as_usize()) + .expect("target_slot must be >= current_slot") + .checked_add(1) + .unwrap(); + + self.extend_slots(num_slots) + } + + /// Uses `Self::extend_chain` to `num_slots` blocks. + /// + /// Utilizes: + /// + /// - BlockStrategy::OnCanonicalHead, + /// - AttestationStrategy::AllValidators, + pub fn extend_slots(&self, num_slots: usize) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + self.advance_slot(); + } + + self.extend_chain( + num_slots, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + } + /// Deprecated: Use add_attested_blocks_at_slots() instead /// /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index af89339c0c..e9649b6114 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -246,8 +246,8 @@ enum Error { /// The file read from disk does not have a contiguous list of validator public keys. The file /// has become corrupted. InconsistentIndex { - expected: Option, - found: usize, + _expected: Option, + _found: usize, }, } @@ -299,8 +299,8 @@ impl ValidatorPubkeyCacheFile { indices.insert(pubkey, index); } else { return Err(Error::InconsistentIndex { - expected, - found: index, + _expected: expected, + _found: index, }); } } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index d2f564146d..4d862cbac7 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,10 +1,8 @@ #![cfg(not(debug_assertions))] -#[macro_use] -extern crate lazy_static; - use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; +use lazy_static::lazy_static; use tree_hash::TreeHash; use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot}; @@ -124,6 +122,24 @@ fn produces_attestations() { ); assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); assert_eq!(data.target.root, target_root, "bad target root"); + + let early_attestation = { + let proto_block = chain.fork_choice.read().get_block(&block_root).unwrap(); + chain + .early_attester_cache + .add_head_block(block_root, block.clone(), proto_block, &state, &chain.spec) + .unwrap(); + chain + .early_attester_cache + .try_attest(slot, index, &chain.spec) + .unwrap() + .unwrap() + }; + + assert_eq!( + attestation, early_attestation, + "early attester cache inconsistent" + ); } } } diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 45d316e3d3..f5942a2be2 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1,8 +1,5 @@ #![cfg(not(debug_assertions))] -#[macro_use] -extern crate lazy_static; - use beacon_chain::{ attestation_verification::Error as AttnError, test_utils::{ @@ -11,6 +8,7 @@ use beacon_chain::{ BeaconChain, BeaconChainTypes, WhenSlotSkipped, }; use int_to_bytes::int_to_bytes32; +use lazy_static::lazy_static; use state_processing::{ per_block_processing::errors::AttestationValidationError, per_slot_processing, }; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 93b303c268..567e0cdb72 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,18 +1,16 @@ #![cfg(not(debug_assertions))] -#[macro_use] -extern crate lazy_static; - use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult}; +use lazy_static::lazy_static; use logging::test_logger; use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ common::get_indexed_attestation, per_block_processing::{per_block_processing, BlockSignatureStrategy}, - per_slot_processing, BlockProcessingError, + per_slot_processing, BlockProcessingError, VerifyBlockRoot, }; use std::sync::Arc; use tempfile::tempdir; @@ -832,11 +830,7 @@ fn block_gossip_verification() { fn verify_block_for_gossip_slashing_detection() { let slasher_dir = tempdir().unwrap(); let slasher = Arc::new( - Slasher::open( - SlasherConfig::new(slasher_dir.path().into()).for_testing(), - test_logger(), - ) - .unwrap(), + Slasher::open(SlasherConfig::new(slasher_dir.path().into()), test_logger()).unwrap(), ); let inner_slasher = slasher.clone(); @@ -980,6 +974,7 @@ fn add_base_block_to_altair_chain() { &base_block, None, BlockSignatureStrategy::NoVerification, + VerifyBlockRoot::True, &harness.chain.spec, ), Err(BlockProcessingError::InconsistentBlockFork( @@ -1098,6 +1093,7 @@ fn add_altair_block_to_base_chain() { &altair_block, None, BlockSignatureStrategy::NoVerification, + VerifyBlockRoot::True, &harness.chain.spec, ), Err(BlockProcessingError::InconsistentBlockFork( diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs new file mode 100644 index 0000000000..fa31af8406 --- /dev/null +++ b/beacon_node/beacon_chain/tests/main.rs @@ -0,0 +1,8 @@ +mod attestation_production; +mod attestation_verification; +mod block_verification; +mod merge; +mod op_verification; +mod store_tests; +mod sync_committee_verification; +mod tests; diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs new file mode 100644 index 0000000000..43ee2372b6 --- /dev/null +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -0,0 +1,182 @@ +#![cfg(not(debug_assertions))] // Tests run too slow in debug. + +use beacon_chain::test_utils::BeaconChainHarness; +use execution_layer::test_utils::{generate_pow_block, DEFAULT_TERMINAL_BLOCK}; +use types::*; + +const VALIDATOR_COUNT: usize = 32; + +type E = MainnetEthSpec; + +fn verify_execution_payload_chain(chain: &[ExecutionPayload]) { + let mut prev_ep: Option> = None; + + for ep in chain { + assert!(*ep != ExecutionPayload::default()); + assert!(ep.block_hash != Hash256::zero()); + + // Check against previous `ExecutionPayload`. + if let Some(prev_ep) = prev_ep { + assert_eq!(prev_ep.block_hash, ep.parent_hash); + assert_eq!(prev_ep.block_number + 1, ep.block_number); + } + prev_ep = Some(ep.clone()); + } +} + +#[test] +// TODO(merge): This isn't working cause the non-zero values in `initialize_beacon_state_from_eth1` +// are causing failed lookups to the execution node. I need to come back to this. +#[should_panic] +fn merge_with_terminal_block_hash_override() { + let altair_fork_epoch = Epoch::new(0); + let bellatrix_fork_epoch = Epoch::new(0); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + + let genesis_pow_block_hash = generate_pow_block( + spec.terminal_total_difficulty, + DEFAULT_TERMINAL_BLOCK, + 0, + Hash256::zero(), + ) + .unwrap() + .block_hash; + + spec.terminal_block_hash = genesis_pow_block_hash; + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + assert_eq!( + harness + .execution_block_generator() + .latest_block() + .unwrap() + .block_hash(), + genesis_pow_block_hash, + "pre-condition" + ); + + assert!( + harness + .chain + .head() + .unwrap() + .beacon_block + .as_merge() + .is_ok(), + "genesis block should be a merge block" + ); + + let mut execution_payloads = vec![]; + for i in 0..E::slots_per_epoch() * 3 { + harness.extend_slots(1); + + let block = harness.chain.head().unwrap().beacon_block; + + let execution_payload = block.message().body().execution_payload().unwrap().clone(); + if i == 0 { + assert_eq!(execution_payload.block_hash, genesis_pow_block_hash); + } + execution_payloads.push(execution_payload); + } + + verify_execution_payload_chain(&execution_payloads); +} + +#[test] +fn base_altair_merge_with_terminal_block_after_fork() { + let altair_fork_epoch = Epoch::new(4); + let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); + let bellatrix_fork_epoch = Epoch::new(8); + let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch()); + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(altair_fork_epoch); + spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); + + let mut execution_payloads = vec![]; + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + /* + * Start with the base fork. + */ + + assert!(harness.chain.head().unwrap().beacon_block.as_base().is_ok()); + + /* + * Do the Altair fork. + */ + + harness.extend_to_slot(altair_fork_slot); + + let altair_head = harness.chain.head().unwrap().beacon_block; + assert!(altair_head.as_altair().is_ok()); + assert_eq!(altair_head.slot(), altair_fork_slot); + + /* + * Do the merge fork, without a terminal PoW block. + */ + + harness.extend_to_slot(merge_fork_slot); + + let merge_head = harness.chain.head().unwrap().beacon_block; + assert!(merge_head.as_merge().is_ok()); + assert_eq!(merge_head.slot(), merge_fork_slot); + assert_eq!( + *merge_head.message().body().execution_payload().unwrap(), + ExecutionPayload::default() + ); + + /* + * Next merge block shouldn't include an exec payload. + */ + + harness.extend_slots(1); + + let one_after_merge_head = harness.chain.head().unwrap().beacon_block; + assert_eq!( + *one_after_merge_head + .message() + .body() + .execution_payload() + .unwrap(), + ExecutionPayload::default() + ); + assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); + + /* + * Trigger the terminal PoW block. + */ + + harness + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + + /* + * Next merge block should include an exec payload. + */ + + for _ in 0..4 { + harness.extend_slots(1); + + let block = harness.chain.head().unwrap().beacon_block; + execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); + } + + verify_execution_payload_chain(&execution_payloads); +} diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index 56e76cffe5..ec22a4804a 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -2,13 +2,11 @@ #![cfg(not(debug_assertions))] -#[macro_use] -extern crate lazy_static; - use beacon_chain::observed_operations::ObservationOutcome; use beacon_chain::test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; +use lazy_static::lazy_static; use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; use store::{LevelDB, StoreConfig}; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index f9af16bbe7..5c020df492 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -14,6 +14,7 @@ use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; use rand::Rng; +use state_processing::BlockReplayer; use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryInto; @@ -126,7 +127,7 @@ fn randomised_skips() { "head should be at the current slot" ); - check_split_slot(&harness, store); + check_split_slot(&harness, store.clone()); check_chain_dump(&harness, num_blocks_produced + 1); check_iterators(&harness); } @@ -358,6 +359,191 @@ fn epoch_boundary_state_attestation_processing() { assert!(checked_pre_fin); } +// Test that the `end_slot` for forwards block and state root iterators works correctly. +#[test] +fn forwards_iter_block_and_state_roots_until() { + let num_blocks_produced = E::slots_per_epoch() * 17; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let all_validators = &harness.get_all_validators(); + let (mut head_state, mut head_state_root) = harness.get_current_state_and_root(); + let head_block_root = harness.chain.head_info().unwrap().block_root; + let mut block_roots = vec![head_block_root]; + let mut state_roots = vec![head_state_root]; + + for slot in (1..=num_blocks_produced).map(Slot::from) { + let (block_root, mut state) = harness + .add_attested_block_at_slot(slot, head_state, head_state_root, all_validators) + .unwrap(); + head_state_root = state.update_tree_hash_cache().unwrap(); + head_state = state; + block_roots.push(block_root.into()); + state_roots.push(head_state_root); + } + + check_finalization(&harness, num_blocks_produced); + check_split_slot(&harness, store.clone()); + + // The last restore point slot is the point at which the hybrid forwards iterator behaviour + // changes. + let last_restore_point_slot = store.get_latest_restore_point_slot(); + assert!(last_restore_point_slot > 0); + + let chain = &harness.chain; + let head_state = harness.get_current_state(); + let head_slot = head_state.slot(); + assert_eq!(head_slot, num_blocks_produced); + + let test_range = |start_slot: Slot, end_slot: Slot| { + let mut block_root_iter = chain + .forwards_iter_block_roots_until(start_slot, end_slot) + .unwrap(); + let mut state_root_iter = chain + .forwards_iter_state_roots_until(start_slot, end_slot) + .unwrap(); + + for slot in (start_slot.as_u64()..=end_slot.as_u64()).map(Slot::new) { + let block_root = block_roots[slot.as_usize()]; + assert_eq!(block_root_iter.next().unwrap().unwrap(), (block_root, slot)); + + let state_root = state_roots[slot.as_usize()]; + assert_eq!(state_root_iter.next().unwrap().unwrap(), (state_root, slot)); + } + }; + + let split_slot = store.get_split_slot(); + assert!(split_slot > last_restore_point_slot); + + test_range(Slot::new(0), last_restore_point_slot); + test_range(last_restore_point_slot, last_restore_point_slot); + test_range(last_restore_point_slot - 1, last_restore_point_slot); + test_range(Slot::new(0), last_restore_point_slot - 1); + test_range(Slot::new(0), split_slot); + test_range(last_restore_point_slot - 1, split_slot); + test_range(Slot::new(0), head_state.slot()); +} + +#[test] +fn block_replay_with_inaccurate_state_roots() { + let num_blocks_produced = E::slots_per_epoch() * 3 + 31; + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let chain = &harness.chain; + + harness.extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + // Slot must not be 0 mod 32 or else no blocks will be replayed. + let (mut head_state, head_root) = harness.get_current_state_and_root(); + assert_ne!(head_state.slot() % 32, 0); + + let mut fast_head_state = store + .get_inconsistent_state_for_attestation_verification_only( + &head_root, + Some(head_state.slot()), + ) + .unwrap() + .unwrap(); + assert_eq!(head_state.validators(), fast_head_state.validators()); + + head_state.build_all_committee_caches(&chain.spec).unwrap(); + fast_head_state + .build_all_committee_caches(&chain.spec) + .unwrap(); + + assert_eq!( + head_state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .unwrap(), + fast_head_state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .unwrap() + ); +} + +#[test] +fn block_replayer_hooks() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let chain = &harness.chain; + + let block_slots = vec![1, 3, 5, 10, 11, 12, 13, 14, 31, 32, 33] + .into_iter() + .map(Slot::new) + .collect::>(); + let max_slot = *block_slots.last().unwrap(); + let all_slots = (0..=max_slot.as_u64()).map(Slot::new).collect::>(); + + let (state, state_root) = harness.get_current_state_and_root(); + let all_validators = harness.get_all_validators(); + let (_, _, end_block_root, mut end_state) = harness.add_attested_blocks_at_slots( + state.clone(), + state_root, + &block_slots, + &all_validators, + ); + + let blocks = store + .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) + .unwrap(); + + let mut pre_slots = vec![]; + let mut post_slots = vec![]; + let mut pre_block_slots = vec![]; + let mut post_block_slots = vec![]; + + let mut replay_state = BlockReplayer::::new(state, &chain.spec) + .pre_slot_hook(Box::new(|state| { + pre_slots.push(state.slot()); + Ok(()) + })) + .post_slot_hook(Box::new(|state, epoch_summary, is_skip_slot| { + if is_skip_slot { + assert!(!block_slots.contains(&state.slot())); + } else { + assert!(block_slots.contains(&state.slot())); + } + if state.slot() % E::slots_per_epoch() == 0 { + assert!(epoch_summary.is_some()); + } + post_slots.push(state.slot()); + Ok(()) + })) + .pre_block_hook(Box::new(|state, block| { + assert_eq!(state.slot(), block.slot()); + pre_block_slots.push(block.slot()); + Ok(()) + })) + .post_block_hook(Box::new(|state, block| { + assert_eq!(state.slot(), block.slot()); + post_block_slots.push(block.slot()); + Ok(()) + })) + .apply_blocks(blocks, None) + .unwrap() + .into_state(); + + // All but last slot seen by pre-slot hook + assert_eq!(&pre_slots, all_slots.split_last().unwrap().1); + // All but 0th slot seen by post-slot hook + assert_eq!(&post_slots, all_slots.split_first().unwrap().1); + // All blocks seen by both hooks + assert_eq!(pre_block_slots, block_slots); + assert_eq!(post_block_slots, block_slots); + + // States match. + end_state.drop_all_caches().unwrap(); + replay_state.drop_all_caches().unwrap(); + assert_eq!(end_state, replay_state); +} + #[test] fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); @@ -430,7 +616,7 @@ fn delete_blocks_and_states() { // Delete faulty fork // Attempting to load those states should find them unavailable for (state_root, slot) in - StateRootsIterator::new(store.clone(), &faulty_head_state).map(Result::unwrap) + StateRootsIterator::new(&store, &faulty_head_state).map(Result::unwrap) { if slot <= unforked_blocks { break; @@ -441,7 +627,7 @@ fn delete_blocks_and_states() { // Double-deleting should also be OK (deleting non-existent things is fine) for (state_root, slot) in - StateRootsIterator::new(store.clone(), &faulty_head_state).map(Result::unwrap) + StateRootsIterator::new(&store, &faulty_head_state).map(Result::unwrap) { if slot <= unforked_blocks { break; @@ -451,7 +637,7 @@ fn delete_blocks_and_states() { // Deleting the blocks from the fork should remove them completely for (block_root, slot) in - BlockRootsIterator::new(store.clone(), &faulty_head_state).map(Result::unwrap) + BlockRootsIterator::new(&store, &faulty_head_state).map(Result::unwrap) { if slot <= unforked_blocks + 1 { break; @@ -2223,9 +2409,10 @@ fn assert_chains_pretty_much_the_same(a: &BeaconChain, b ); let slot = a.slot().unwrap(); + let spec = T::EthSpec::default_spec(); assert!( - a.fork_choice.write().get_head(slot).unwrap() - == b.fork_choice.write().get_head(slot).unwrap(), + a.fork_choice.write().get_head(slot, &spec).unwrap() + == b.fork_choice.write().get_head(slot, &spec).unwrap(), "fork_choice heads should be equal" ); } diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 7326a02f46..2596ff18c1 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -1,11 +1,9 @@ #![cfg(not(debug_assertions))] -#[macro_use] -extern crate lazy_static; - use beacon_chain::sync_committee_verification::Error as SyncCommitteeError; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; use int_to_bytes::int_to_bytes32; +use lazy_static::lazy_static; use safe_arith::SafeArith; use store::{SignedContributionAndProof, SyncCommitteeMessage}; use tree_hash::TreeHash; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 5b85da5bf8..4f2d3904e5 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -1,8 +1,5 @@ #![cfg(not(debug_assertions))] -#[macro_use] -extern crate lazy_static; - use beacon_chain::{ attestation_verification::Error as AttnError, test_utils::{ @@ -11,6 +8,7 @@ use beacon_chain::{ }, StateSkipConfig, WhenSlotSkipped, }; +use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{ per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError, diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 165904a4c9..acb8376dbd 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -31,10 +31,11 @@ task_executor = { path = "../../common/task_executor" } environment = { path = "../../lighthouse/environment" } lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -time = "0.3.3" +time = "0.3.5" directory = {path = "../../common/directory"} http_api = { path = "../http_api" } http_metrics = { path = "../http_metrics" } slasher = { path = "../../slasher" } slasher_service = { path = "../../slasher/service" } monitoring_api = {path = "../../common/monitoring_api"} +execution_layer = { path = "../execution_layer" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 6661fa2290..550d89125e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -16,13 +16,14 @@ use eth2::{ types::{BlockId, StateId}, BeaconNodeHttpClient, Error as ApiError, Timeouts, }; -use genesis::{interop_genesis_state, Eth1GenesisService}; -use lighthouse_network::NetworkGlobals; +use execution_layer::ExecutionLayer; +use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; +use lighthouse_network::{open_metrics_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkMessage, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; -use slog::{debug, info, warn}; +use slog::{debug, info, warn, Logger}; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -30,7 +31,8 @@ use std::time::Duration; use timer::spawn_timer; use tokio::sync::{mpsc::UnboundedSender, oneshot}; use types::{ - test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, SignedBeaconBlock, + test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, Hash256, + SignedBeaconBlock, }; /// Interval between polling the eth1 node for genesis information. @@ -63,6 +65,7 @@ pub struct ClientBuilder { eth1_service: Option, network_globals: Option>>, network_send: Option>>, + gossipsub_registry: Option, db_path: Option, freezer_db_path: Option, http_api_config: http_api::Config, @@ -94,6 +97,7 @@ where eth1_service: None, network_globals: None, network_send: None, + gossipsub_registry: None, db_path: None, freezer_db_path: None, http_api_config: <_>::default(), @@ -131,7 +135,6 @@ where let chain_spec = self.chain_spec.clone(); let runtime_context = self.runtime_context.clone(); let eth_spec_instance = self.eth_spec_instance.clone(); - let disabled_forks = config.disabled_forks.clone(); let chain_config = config.chain.clone(); let graffiti = config.graffiti; @@ -146,14 +149,28 @@ where None }; + let execution_layer = if let Some(execution_endpoints) = config.execution_endpoints { + let context = runtime_context.service_context("exec".into()); + let execution_layer = ExecutionLayer::from_urls( + execution_endpoints, + config.suggested_fee_recipient, + context.executor.clone(), + context.log().clone(), + ) + .map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?; + Some(execution_layer) + } else { + None + }; + let builder = BeaconChainBuilder::new(eth_spec_instance) .logger(context.log().clone()) .store(store) .custom_spec(spec.clone()) .chain_config(chain_config) - .disabled_forks(disabled_forks) .graffiti(graffiti) .event_handler(event_handler) + .execution_layer(execution_layer) .monitor_validators( config.validator_monitor_auto, config.validator_monitor_pubkeys.clone(), @@ -204,7 +221,13 @@ where genesis_time, } => { let keypairs = generate_deterministic_keypairs(validator_count); - let genesis_state = interop_genesis_state(&keypairs, genesis_time, &spec)?; + let genesis_state = interop_genesis_state( + &keypairs, + genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + &spec, + )?; builder.genesis_state(genesis_state).map(|v| (v, None))? } ClientGenesis::SszBytes { @@ -425,13 +448,27 @@ where .ok_or("network requires a runtime_context")? .clone(); - let (network_globals, network_send) = - NetworkService::start(beacon_chain, config, context.executor) - .await - .map_err(|e| format!("Failed to start network: {:?}", e))?; + // If gossipsub metrics are required we build a registry to record them + let mut gossipsub_registry = if config.metrics_enabled { + Some(Registry::default()) + } else { + None + }; + + let (network_globals, network_send) = NetworkService::start( + beacon_chain, + config, + context.executor, + gossipsub_registry + .as_mut() + .map(|registry| registry.sub_registry_with_prefix("gossipsub")), + ) + .await + .map_err(|e| format!("Failed to start network: {:?}", e))?; self.network_globals = Some(network_globals); self.network_send = Some(network_send); + self.gossipsub_registry = gossipsub_registry; Ok(self) } @@ -539,13 +576,13 @@ where Ok(self) } - /// Consumers the builder, returning a `Client` if all necessary components have been + /// Consumes the builder, returning a `Client` if all necessary components have been /// specified. /// /// If type inference errors are being raised, see the comment on the definition of `Self`. #[allow(clippy::type_complexity)] pub fn build( - self, + mut self, ) -> Result>, String> { let runtime_context = self @@ -592,6 +629,7 @@ where chain: self.beacon_chain.clone(), db_path: self.db_path.clone(), freezer_db_path: self.freezer_db_path.clone(), + gossipsub_registry: self.gossipsub_registry.take().map(std::sync::Mutex::new), log: log.clone(), }); @@ -616,8 +654,53 @@ where if let Some(beacon_chain) = self.beacon_chain.as_ref() { let state_advance_context = runtime_context.service_context("state_advance".into()); - let log = state_advance_context.log().clone(); - spawn_state_advance_timer(state_advance_context.executor, beacon_chain.clone(), log); + let state_advance_log = state_advance_context.log().clone(); + spawn_state_advance_timer( + state_advance_context.executor, + beacon_chain.clone(), + state_advance_log, + ); + + if let Some(execution_layer) = beacon_chain.execution_layer.as_ref() { + let store = beacon_chain.store.clone(); + let inner_execution_layer = execution_layer.clone(); + + let head = beacon_chain + .head_info() + .map_err(|e| format!("Unable to read beacon chain head: {:?}", e))?; + + // Issue the head to the execution engine on startup. This ensures it can start + // syncing. + if let Some(block_hash) = head.execution_payload_block_hash { + runtime_context.executor.spawn( + async move { + let result = BeaconChain::< + Witness, + >::update_execution_engine_forkchoice( + inner_execution_layer, + store, + head.finalized_checkpoint.root, + block_hash, + ) + .await; + + // No need to exit early if setting the head fails. It will be set again if/when the + // node comes online. + if let Err(e) = result { + warn!( + log, + "Failed to update head on execution engines"; + "error" => ?e + ); + } + }, + "el_fork_choice_update", + ); + } + + // Spawn a routine that tracks the status of the execution engines. + execution_layer.spawn_watchdog_routine(beacon_chain.slot_clock.clone()); + } } Ok(Client { @@ -680,6 +763,7 @@ where hot_path: &Path, cold_path: &Path, config: StoreConfig, + log: Logger, ) -> Result { let context = self .runtime_context @@ -695,7 +779,7 @@ where self.freezer_db_path = Some(cold_path.into()); let schema_upgrade = |db, from, to| { - migrate_schema::>(db, datadir, from, to) + migrate_schema::>(db, datadir, from, to, log) }; let store = HotColdDB::open( diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 40e13898b9..9768962260 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -4,7 +4,7 @@ use sensitive_url::SensitiveUrl; use serde_derive::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; -use types::{Graffiti, PublicKeyBytes}; +use types::{Address, Graffiti, PublicKeyBytes}; /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; @@ -58,8 +58,6 @@ pub struct Config { /// This is the method used for the 2019 client interop in Canada. pub dummy_eth1_backend: bool, pub sync_eth1_chain: bool, - /// A list of hard-coded forks that will be disabled. - pub disabled_forks: Vec, /// Graffiti to be inserted everytime we create a block. pub graffiti: Graffiti, /// When true, automatically monitor validators using the HTTP API. @@ -74,6 +72,8 @@ pub struct Config { pub network: network::NetworkConfig, pub chain: beacon_chain::ChainConfig, pub eth1: eth1::Config, + pub execution_endpoints: Option>, + pub suggested_fee_recipient: Option
, pub http_api: http_api::Config, pub http_metrics: http_metrics::Config, pub monitoring_api: Option, @@ -94,7 +94,8 @@ impl Default for Config { dummy_eth1_backend: false, sync_eth1_chain: false, eth1: <_>::default(), - disabled_forks: Vec::new(), + execution_endpoints: None, + suggested_fee_recipient: None, graffiti: Graffiti::default(), http_api: <_>::default(), http_metrics: <_>::default(), @@ -108,58 +109,83 @@ impl Default for Config { impl Config { /// Get the database path without initialising it. - pub fn get_db_path(&self) -> Option { - self.get_data_dir() - .map(|data_dir| data_dir.join(&self.db_name)) + pub fn get_db_path(&self) -> PathBuf { + self.get_data_dir().join(&self.db_name) } /// Get the database path, creating it if necessary. pub fn create_db_path(&self) -> Result { - let db_path = self - .get_db_path() - .ok_or("Unable to locate user home directory")?; - ensure_dir_exists(db_path) + ensure_dir_exists(self.get_db_path()) } /// Fetch default path to use for the freezer database. - fn default_freezer_db_path(&self) -> Option { - self.get_data_dir() - .map(|data_dir| data_dir.join(DEFAULT_FREEZER_DB_DIR)) + fn default_freezer_db_path(&self) -> PathBuf { + self.get_data_dir().join(DEFAULT_FREEZER_DB_DIR) } /// Returns the path to which the client may initialize the on-disk freezer database. /// /// Will attempt to use the user-supplied path from e.g. the CLI, or will default /// to a directory in the data_dir if no path is provided. - pub fn get_freezer_db_path(&self) -> Option { + pub fn get_freezer_db_path(&self) -> PathBuf { self.freezer_db_path .clone() - .or_else(|| self.default_freezer_db_path()) + .unwrap_or_else(|| self.default_freezer_db_path()) } /// Get the freezer DB path, creating it if necessary. pub fn create_freezer_db_path(&self) -> Result { - let freezer_db_path = self - .get_freezer_db_path() - .ok_or("Unable to locate user home directory")?; - ensure_dir_exists(freezer_db_path) + ensure_dir_exists(self.get_freezer_db_path()) + } + + /// Returns the "modern" path to the data_dir. + /// + /// See `Self::get_data_dir` documentation for more info. + fn get_modern_data_dir(&self) -> PathBuf { + self.data_dir.clone() + } + + /// Returns the "legacy" path to the data_dir. + /// + /// See `Self::get_data_dir` documentation for more info. + pub fn get_existing_legacy_data_dir(&self) -> Option { + dirs::home_dir() + .map(|home_dir| home_dir.join(&self.data_dir)) + // Return `None` if the directory does not exists. + .filter(|dir| dir.exists()) + // Return `None` if the legacy directory is identical to the modern. + .filter(|dir| *dir != self.get_modern_data_dir()) } /// Returns the core path for the client. /// /// Will not create any directories. - pub fn get_data_dir(&self) -> Option { - dirs::home_dir().map(|home_dir| home_dir.join(&self.data_dir)) + /// + /// ## Legacy Info + /// + /// Legacy versions of Lighthouse did not properly handle relative paths for `--datadir`. + /// + /// For backwards compatibility, we still compute the legacy path and check if it exists. If + /// it does exist, we use that directory rather than the modern path. + /// + /// For more information, see: + /// + /// https://github.com/sigp/lighthouse/pull/2843 + fn get_data_dir(&self) -> PathBuf { + let existing_legacy_dir = self.get_existing_legacy_data_dir(); + + if let Some(legacy_dir) = existing_legacy_dir { + legacy_dir + } else { + self.get_modern_data_dir() + } } /// Returns the core path for the client. /// /// Creates the directory if it does not exist. pub fn create_data_dir(&self) -> Result { - let path = self - .get_data_dir() - .ok_or("Unable to locate user home directory")?; - ensure_dir_exists(path) + ensure_dir_exists(self.get_data_dir()) } } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 380af25687..22c3bfcb3a 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,8 +1,8 @@ use crate::metrics; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes, HeadSafetyStatus}; use lighthouse_network::{types::SyncState, NetworkGlobals}; use parking_lot::Mutex; -use slog::{debug, error, info, warn, Logger}; +use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -263,10 +263,43 @@ pub fn spawn_notifier( } else { head_root.to_string() }; + + let block_hash = match beacon_chain.head_safety_status() { + Ok(HeadSafetyStatus::Safe(hash_opt)) => hash_opt + .map(|hash| format!("{} (verified)", hash)) + .unwrap_or_else(|| "n/a".to_string()), + Ok(HeadSafetyStatus::Unsafe(block_hash)) => { + warn!( + log, + "Head execution payload is unverified"; + "execution_block_hash" => ?block_hash, + ); + format!("{} (unverified)", block_hash) + } + Ok(HeadSafetyStatus::Invalid(block_hash)) => { + crit!( + log, + "Head execution payload is invalid"; + "msg" => "this scenario may be unrecoverable", + "execution_block_hash" => ?block_hash, + ); + format!("{} (invalid)", block_hash) + } + Err(e) => { + error!( + log, + "Failed to read head safety status"; + "error" => ?e + ); + "n/a".to_string() + } + }; + info!( log, "Synced"; "peers" => peer_count_pretty(connected_peer_count), + "exec_hash" => block_hash, "finalized_root" => format!("{}", finalized_root), "finalized_epoch" => finalized_epoch, "epoch" => current_epoch, @@ -302,6 +335,11 @@ fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger if let Ok(head_info) = beacon_chain.head_info() { // Perform some logging about the eth1 chain if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { + // No need to do logging if using the dummy backend. + if eth1_chain.is_dummy_backend() { + return; + } + if let Some(status) = eth1_chain.sync_status(head_info.genesis_time, current_slot_opt, &beacon_chain.spec) { diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 7103d1b487..0b0c2ea168 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" eth1_test_rig = { path = "../../testing/eth1_test_rig" } toml = "0.5.6" web3 = { version = "0.17.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } -sloggers = "2.0.2" +sloggers = { version = "2.1.1", features = ["json"] } environment = { path = "../../lighthouse/environment" } [dependencies] @@ -19,9 +19,9 @@ serde = { version = "1.0.116", features = ["derive"] } hex = "0.4.2" types = { path = "../../consensus/types"} merkle_proof = { path = "../../consensus/merkle_proof"} -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" -tree_hash = "0.4.0" +tree_hash = "0.4.1" parking_lot = "0.11.0" slog = "2.5.2" tokio = { version = "1.14.0", features = ["full"] } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml new file mode 100644 index 0000000000..c166024c06 --- /dev/null +++ b/beacon_node/execution_layer/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "execution_layer" +version = "0.1.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +types = { path = "../../consensus/types"} +tokio = { version = "1.10.0", features = ["full"] } +async-trait = "0.1.51" +slog = "2.5.2" +futures = "0.3.7" +sensitive_url = { path = "../../common/sensitive_url" } +reqwest = { version = "0.11.0", features = ["json","stream"] } +eth2_serde_utils = "0.1.1" +serde_json = "1.0.58" +serde = { version = "1.0.116", features = ["derive"] } +eth1 = { path = "../eth1" } +warp = { git = "https://github.com/macladson/warp", rev ="dfa259e", features = ["tls"] } +environment = { path = "../../lighthouse/environment" } +bytes = "1.1.0" +task_executor = { path = "../../common/task_executor" } +hex = "0.4.2" +eth2_ssz_types = "0.2.2" +lru = "0.7.1" +exit-future = "0.2.0" +tree_hash = "0.4.1" +tree_hash_derive = { path = "../../consensus/tree_hash_derive"} +parking_lot = "0.11.0" +slot_clock = { path = "../../common/slot_clock" } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs new file mode 100644 index 0000000000..f9654a497b --- /dev/null +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -0,0 +1,122 @@ +use async_trait::async_trait; +use eth1::http::RpcError; +use serde::{Deserialize, Serialize}; + +pub const LATEST_TAG: &str = "latest"; + +use crate::engines::ForkChoiceState; +pub use types::{Address, EthSpec, ExecutionPayload, Hash256, Uint256}; + +pub mod http; +pub mod json_structures; + +pub type PayloadId = [u8; 8]; + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + BadResponse(String), + RequestFailed(String), + JsonRpc(RpcError), + Json(serde_json::Error), + ServerMessage { code: i64, message: String }, + Eip155Failure, + IsSyncing, + ExecutionBlockNotFound(Hash256), + ExecutionHeadBlockNotFound, + ParentHashEqualsBlockHash(Hash256), + PayloadIdUnavailable, +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: serde_json::Error) -> Self { + Error::Json(e) + } +} + +/// A generic interface for an execution engine API. +#[async_trait] +pub trait EngineApi { + async fn upcheck(&self) -> Result<(), Error>; + + async fn get_block_by_number<'a>( + &self, + block_by_number: BlockByNumberQuery<'a>, + ) -> Result, Error>; + + async fn get_block_by_hash<'a>( + &self, + block_hash: Hash256, + ) -> Result, Error>; + + async fn execute_payload_v1( + &self, + execution_payload: ExecutionPayload, + ) -> Result; + + async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> Result, Error>; + + async fn forkchoice_updated_v1( + &self, + forkchoice_state: ForkChoiceState, + payload_attributes: Option, + ) -> Result; +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum ExecutePayloadResponseStatus { + Valid, + Invalid, + Syncing, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct ExecutePayloadResponse { + pub status: ExecutePayloadResponseStatus, + pub latest_valid_hash: Option, + pub validation_error: Option, +} + +#[derive(Clone, Copy, Debug, PartialEq, Serialize)] +#[serde(untagged)] +pub enum BlockByNumberQuery<'a> { + Tag(&'a str), +} + +#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecutionBlock { + #[serde(rename = "hash")] + pub block_hash: Hash256, + #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + pub block_number: u64, + pub parent_hash: Hash256, + pub total_difficulty: Uint256, +} + +#[derive(Clone, Copy, Debug)] +pub struct PayloadAttributes { + pub timestamp: u64, + pub random: Hash256, + pub suggested_fee_recipient: Address, +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum ForkchoiceUpdatedResponseStatus { + Success, + Syncing, +} +#[derive(Clone, Debug, PartialEq)] +pub struct ForkchoiceUpdatedResponse { + pub status: ForkchoiceUpdatedResponseStatus, + pub payload_id: Option, +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs new file mode 100644 index 0000000000..c7c60a9006 --- /dev/null +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -0,0 +1,853 @@ +//! Contains an implementation of `EngineAPI` using the JSON-RPC API via HTTP. + +use super::*; +use crate::json_structures::*; +use async_trait::async_trait; +use eth1::http::EIP155_ERROR_STR; +use reqwest::header::CONTENT_TYPE; +use sensitive_url::SensitiveUrl; +use serde::de::DeserializeOwned; +use serde_json::json; +use std::time::Duration; +use types::EthSpec; + +pub use reqwest::Client; + +const STATIC_ID: u32 = 1; +pub const JSONRPC_VERSION: &str = "2.0"; + +pub const RETURN_FULL_TRANSACTION_OBJECTS: bool = false; + +pub const ETH_GET_BLOCK_BY_NUMBER: &str = "eth_getBlockByNumber"; +pub const ETH_GET_BLOCK_BY_NUMBER_TIMEOUT: Duration = Duration::from_secs(1); + +pub const ETH_GET_BLOCK_BY_HASH: &str = "eth_getBlockByHash"; +pub const ETH_GET_BLOCK_BY_HASH_TIMEOUT: Duration = Duration::from_secs(1); + +pub const ETH_SYNCING: &str = "eth_syncing"; +pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_millis(250); + +pub const ENGINE_EXECUTE_PAYLOAD_V1: &str = "engine_executePayloadV1"; +pub const ENGINE_EXECUTE_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); + +pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; +pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); + +pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; +pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_millis(500); + +pub struct HttpJsonRpc { + pub client: Client, + pub url: SensitiveUrl, +} + +impl HttpJsonRpc { + pub fn new(url: SensitiveUrl) -> Result { + Ok(Self { + client: Client::builder().build()?, + url, + }) + } + + pub async fn rpc_request( + &self, + method: &str, + params: serde_json::Value, + timeout: Duration, + ) -> Result { + let body = JsonRequestBody { + jsonrpc: JSONRPC_VERSION, + method, + params, + id: STATIC_ID, + }; + + let body: JsonResponseBody = self + .client + .post(self.url.full.clone()) + .timeout(timeout) + .header(CONTENT_TYPE, "application/json") + .json(&body) + .send() + .await? + .error_for_status()? + .json() + .await?; + + match (body.result, body.error) { + (result, None) => serde_json::from_value(result).map_err(Into::into), + (_, Some(error)) => { + if error.message.contains(EIP155_ERROR_STR) { + Err(Error::Eip155Failure) + } else { + Err(Error::ServerMessage { + code: error.code, + message: error.message, + }) + } + } + } + } +} + +#[async_trait] +impl EngineApi for HttpJsonRpc { + async fn upcheck(&self) -> Result<(), Error> { + let result: serde_json::Value = self + .rpc_request(ETH_SYNCING, json!([]), ETH_SYNCING_TIMEOUT) + .await?; + + /* + * TODO + * + * Check the network and chain ids. We omit this to save time for the merge f2f and since it + * also seems like it might get annoying during development. + */ + match result.as_bool() { + Some(false) => Ok(()), + _ => Err(Error::IsSyncing), + } + } + + async fn get_block_by_number<'a>( + &self, + query: BlockByNumberQuery<'a>, + ) -> Result, Error> { + let params = json!([query, RETURN_FULL_TRANSACTION_OBJECTS]); + + self.rpc_request( + ETH_GET_BLOCK_BY_NUMBER, + params, + ETH_GET_BLOCK_BY_NUMBER_TIMEOUT, + ) + .await + } + + async fn get_block_by_hash<'a>( + &self, + block_hash: Hash256, + ) -> Result, Error> { + let params = json!([block_hash, RETURN_FULL_TRANSACTION_OBJECTS]); + + self.rpc_request(ETH_GET_BLOCK_BY_HASH, params, ETH_GET_BLOCK_BY_HASH_TIMEOUT) + .await + } + + async fn execute_payload_v1( + &self, + execution_payload: ExecutionPayload, + ) -> Result { + let params = json!([JsonExecutionPayloadV1::from(execution_payload)]); + + let response: JsonExecutePayloadV1Response = self + .rpc_request( + ENGINE_EXECUTE_PAYLOAD_V1, + params, + ENGINE_EXECUTE_PAYLOAD_TIMEOUT, + ) + .await?; + + Ok(response.into()) + } + + async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + let response: JsonExecutionPayloadV1 = self + .rpc_request(ENGINE_GET_PAYLOAD_V1, params, ENGINE_GET_PAYLOAD_TIMEOUT) + .await?; + + Ok(response.into()) + } + + async fn forkchoice_updated_v1( + &self, + forkchoice_state: ForkChoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkChoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributesV1::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V1, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT, + ) + .await?; + + Ok(response.into()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::test_utils::MockServer; + use std::future::Future; + use std::str::FromStr; + use std::sync::Arc; + use types::{MainnetEthSpec, Transaction, Unsigned, VariableList}; + + struct Tester { + server: MockServer, + rpc_client: Arc, + echo_client: Arc, + } + + impl Tester { + pub fn new() -> Self { + let server = MockServer::unit_testing(); + + let rpc_url = SensitiveUrl::parse(&server.url()).unwrap(); + let rpc_client = Arc::new(HttpJsonRpc::new(rpc_url).unwrap()); + + let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); + let echo_client = Arc::new(HttpJsonRpc::new(echo_url).unwrap()); + + Self { + server, + rpc_client, + echo_client, + } + } + + pub async fn assert_request_equals( + self, + request_func: R, + expected_json: serde_json::Value, + ) -> Self + where + R: Fn(Arc) -> F, + F: Future, + { + request_func(self.echo_client.clone()).await; + let request_bytes = self.server.last_echo_request(); + let request_json: serde_json::Value = + serde_json::from_slice(&request_bytes).expect("request was not valid json"); + if request_json != expected_json { + panic!( + "json mismatch!\n\nobserved: {}\n\nexpected: {}\n\n", + request_json, expected_json, + ) + } + self + } + + pub async fn with_preloaded_responses( + self, + preloaded_responses: Vec, + request_func: R, + ) -> Self + where + R: Fn(Arc) -> F, + F: Future, + { + for response in preloaded_responses { + self.server.push_preloaded_response(response); + } + request_func(self.rpc_client.clone()).await; + self + } + } + + const HASH_00: &str = "0x0000000000000000000000000000000000000000000000000000000000000000"; + const HASH_01: &str = "0x0101010101010101010101010101010101010101010101010101010101010101"; + + const ADDRESS_00: &str = "0x0000000000000000000000000000000000000000"; + const ADDRESS_01: &str = "0x0101010101010101010101010101010101010101"; + + const JSON_NULL: serde_json::Value = serde_json::Value::Null; + const LOGS_BLOOM_00: &str = "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + const LOGS_BLOOM_01: &str = "0x01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101"; + + fn encode_transactions( + transactions: VariableList< + Transaction, + E::MaxTransactionsPerPayload, + >, + ) -> Result { + let ep: JsonExecutionPayloadV1 = JsonExecutionPayloadV1 { + transactions, + ..<_>::default() + }; + let json = serde_json::to_value(&ep)?; + Ok(json.get("transactions").unwrap().clone()) + } + + fn decode_transactions( + transactions: serde_json::Value, + ) -> Result< + VariableList, E::MaxTransactionsPerPayload>, + serde_json::Error, + > { + let mut json = json!({ + "parentHash": HASH_00, + "feeRecipient": ADDRESS_01, + "stateRoot": HASH_01, + "receiptsRoot": HASH_00, + "logsBloom": LOGS_BLOOM_01, + "random": HASH_01, + "blockNumber": "0x0", + "gasLimit": "0x1", + "gasUsed": "0x2", + "timestamp": "0x2a", + "extraData": "0x", + "baseFeePerGas": "0x1", + "blockHash": HASH_01, + }); + // Take advantage of the fact that we own `transactions` and don't need to reserialize it. + json.as_object_mut() + .unwrap() + .insert("transactions".into(), transactions); + let ep: JsonExecutionPayloadV1 = serde_json::from_value(json)?; + Ok(ep.transactions) + } + + fn assert_transactions_serde( + name: &str, + as_obj: VariableList, E::MaxTransactionsPerPayload>, + as_json: serde_json::Value, + ) { + assert_eq!( + encode_transactions::(as_obj.clone()).unwrap(), + as_json, + "encoding for {}", + name + ); + assert_eq!( + decode_transactions::(as_json).unwrap(), + as_obj, + "decoding for {}", + name + ); + } + + /// Example: if `spec == &[1, 1]`, then two one-byte transactions will be created. + fn generate_transactions( + spec: &[usize], + ) -> VariableList, E::MaxTransactionsPerPayload> { + let mut txs = VariableList::default(); + + for &num_bytes in spec { + let mut tx = VariableList::default(); + for _ in 0..num_bytes { + tx.push(0).unwrap(); + } + txs.push(tx).unwrap(); + } + + txs + } + + #[test] + fn transaction_serde() { + assert_transactions_serde::( + "empty", + generate_transactions::(&[]), + json!([]), + ); + assert_transactions_serde::( + "one empty tx", + generate_transactions::(&[0]), + json!(["0x"]), + ); + assert_transactions_serde::( + "two empty txs", + generate_transactions::(&[0, 0]), + json!(["0x", "0x"]), + ); + assert_transactions_serde::( + "one one-byte tx", + generate_transactions::(&[1]), + json!(["0x00"]), + ); + assert_transactions_serde::( + "two one-byte txs", + generate_transactions::(&[1, 1]), + json!(["0x00", "0x00"]), + ); + assert_transactions_serde::( + "mixed bag", + generate_transactions::(&[0, 1, 3, 0]), + json!(["0x", "0x00", "0x000000", "0x"]), + ); + + /* + * Check for too many transactions + */ + + let num_max_txs = ::MaxTransactionsPerPayload::to_usize(); + let max_txs = (0..num_max_txs).map(|_| "0x00").collect::>(); + let too_many_txs = (0..=num_max_txs).map(|_| "0x00").collect::>(); + + decode_transactions::(serde_json::to_value(max_txs).unwrap()).unwrap(); + assert!( + decode_transactions::(serde_json::to_value(too_many_txs).unwrap()) + .is_err() + ); + } + + #[tokio::test] + async fn get_block_by_number_request() { + Tester::new() + .assert_request_equals( + |client| async move { + let _ = client + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) + .await; + }, + json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "method": ETH_GET_BLOCK_BY_NUMBER, + "params": ["latest", false] + }), + ) + .await; + } + + #[tokio::test] + async fn get_block_by_hash_request() { + Tester::new() + .assert_request_equals( + |client| async move { + let _ = client.get_block_by_hash(Hash256::repeat_byte(1)).await; + }, + json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "method": ETH_GET_BLOCK_BY_HASH, + "params": [HASH_01, false] + }), + ) + .await; + } + + #[tokio::test] + async fn forkchoice_updated_v1_with_payload_attributes_request() { + Tester::new() + .assert_request_equals( + |client| async move { + let _ = client + .forkchoice_updated_v1( + ForkChoiceState { + head_block_hash: Hash256::repeat_byte(1), + safe_block_hash: Hash256::repeat_byte(1), + finalized_block_hash: Hash256::zero(), + }, + Some(PayloadAttributes { + timestamp: 5, + random: Hash256::zero(), + suggested_fee_recipient: Address::repeat_byte(0), + }), + ) + .await; + }, + json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "method": ENGINE_FORKCHOICE_UPDATED_V1, + "params": [{ + "headBlockHash": HASH_01, + "safeBlockHash": HASH_01, + "finalizedBlockHash": HASH_00, + }, + { + "timestamp":"0x5", + "random": HASH_00, + "suggestedFeeRecipient": ADDRESS_00 + }] + }), + ) + .await; + } + + #[tokio::test] + async fn get_payload_v1_request() { + Tester::new() + .assert_request_equals( + |client| async move { + let _ = client.get_payload_v1::([42; 8]).await; + }, + json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "method": ENGINE_GET_PAYLOAD_V1, + "params": ["0x2a2a2a2a2a2a2a2a"] + }), + ) + .await; + } + + #[tokio::test] + async fn execute_payload_v1_request() { + Tester::new() + .assert_request_equals( + |client| async move { + let _ = client + .execute_payload_v1::(ExecutionPayload { + parent_hash: Hash256::repeat_byte(0), + fee_recipient: Address::repeat_byte(1), + state_root: Hash256::repeat_byte(1), + receipt_root: Hash256::repeat_byte(0), + logs_bloom: vec![1; 256].into(), + random: Hash256::repeat_byte(1), + block_number: 0, + gas_limit: 1, + gas_used: 2, + timestamp: 42, + extra_data: vec![].into(), + base_fee_per_gas: Uint256::from(1), + block_hash: Hash256::repeat_byte(1), + transactions: vec![].into(), + }) + .await; + }, + json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "method": ENGINE_EXECUTE_PAYLOAD_V1, + "params": [{ + "parentHash": HASH_00, + "feeRecipient": ADDRESS_01, + "stateRoot": HASH_01, + "receiptsRoot": HASH_00, + "logsBloom": LOGS_BLOOM_01, + "random": HASH_01, + "blockNumber": "0x0", + "gasLimit": "0x1", + "gasUsed": "0x2", + "timestamp": "0x2a", + "extraData": "0x", + "baseFeePerGas": "0x1", + "blockHash": HASH_01, + "transactions": [], + }] + }), + ) + .await; + } + + #[tokio::test] + async fn forkchoice_updated_v1_request() { + Tester::new() + .assert_request_equals( + |client| async move { + let _ = client + .forkchoice_updated_v1( + ForkChoiceState { + head_block_hash: Hash256::repeat_byte(0), + safe_block_hash: Hash256::repeat_byte(0), + finalized_block_hash: Hash256::repeat_byte(1), + }, + None, + ) + .await; + }, + json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "method": ENGINE_FORKCHOICE_UPDATED_V1, + "params": [{ + "headBlockHash": HASH_00, + "safeBlockHash": HASH_00, + "finalizedBlockHash": HASH_01, + }, JSON_NULL] + }), + ) + .await; + } + + fn str_to_payload_id(s: &str) -> PayloadId { + serde_json::from_str::(&format!("\"{}\"", s)) + .unwrap() + .into() + } + + #[test] + fn str_payload_id() { + assert_eq!( + str_to_payload_id("0x002a2a2a2a2a2a01"), + [0, 42, 42, 42, 42, 42, 42, 1] + ); + } + + /// Test vectors provided by Geth: + /// + /// https://notes.ethereum.org/@9AeMAlpyQYaAAyuj47BzRw/rkwW3ceVY + /// + /// The `id` field has been modified on these vectors to match the one we use. + #[tokio::test] + async fn geth_test_vectors() { + Tester::new() + .assert_request_equals( + // engine_forkchoiceUpdatedV1 (prepare payload) REQUEST validation + |client| async move { + let _ = client + .forkchoice_updated_v1( + ForkChoiceState { + head_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + safe_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + finalized_block_hash: Hash256::zero(), + }, + Some(PayloadAttributes { + timestamp: 5, + random: Hash256::zero(), + suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), + }) + ) + .await; + }, + json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "method": ENGINE_FORKCHOICE_UPDATED_V1, + "params": [{ + "headBlockHash": "0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a", + "safeBlockHash": "0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a", + "finalizedBlockHash": HASH_00, + }, + { + "timestamp":"0x5", + "random": HASH_00, + "suggestedFeeRecipient":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" + }] + }) + ) + .await + .with_preloaded_responses( + // engine_forkchoiceUpdatedV1 (prepare payload) RESPONSE validation + vec![json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "result": { + "status": "SUCCESS", + "payloadId": "0xa247243752eb10b4" + } + })], + |client| async move { + let response = client + .forkchoice_updated_v1( + ForkChoiceState { + head_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + safe_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + finalized_block_hash: Hash256::zero(), + }, + Some(PayloadAttributes { + timestamp: 5, + random: Hash256::zero(), + suggested_fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), + }) + ) + .await + .unwrap(); + assert_eq!(response, ForkchoiceUpdatedResponse { + status: ForkchoiceUpdatedResponseStatus::Success, + payload_id: + Some(str_to_payload_id("0xa247243752eb10b4")), + }); + }, + ) + .await + .assert_request_equals( + // engine_getPayloadV1 REQUEST validation + |client| async move { + let _ = client + .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) + .await; + }, + json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "method": ENGINE_GET_PAYLOAD_V1, + "params": ["0xa247243752eb10b4"] + }) + ) + .await + .with_preloaded_responses( + // engine_getPayloadV1 RESPONSE validation + vec![json!({ + "jsonrpc":JSONRPC_VERSION, + "id":STATIC_ID, + "result":{ + "parentHash":"0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a", + "feeRecipient":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45", + "receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsBloom": LOGS_BLOOM_00, + "random": HASH_00, + "blockNumber":"0x1", + "gasLimit":"0x1c9c380", + "gasUsed":"0x0", + "timestamp":"0x5", + "extraData":"0x", + "baseFeePerGas":"0x7", + "blockHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858", + "transactions":[] + } + })], + |client| async move { + let payload = client + .get_payload_v1::(str_to_payload_id("0xa247243752eb10b4")) + .await + .unwrap(); + + let expected = ExecutionPayload { + parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), + state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), + receipt_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), + logs_bloom: vec![0; 256].into(), + random: Hash256::zero(), + block_number: 1, + gas_limit: u64::from_str_radix("1c9c380",16).unwrap(), + gas_used: 0, + timestamp: 5, + extra_data: vec![].into(), + base_fee_per_gas: Uint256::from(7), + block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + transactions: vec![].into(), + }; + + assert_eq!(payload, expected); + }, + ) + .await + .assert_request_equals( + // engine_executePayloadV1 REQUEST validation + |client| async move { + let _ = client + .execute_payload_v1::(ExecutionPayload { + parent_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), + state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), + receipt_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), + logs_bloom: vec![0; 256].into(), + random: Hash256::zero(), + block_number: 1, + gas_limit: u64::from_str_radix("1c9c380",16).unwrap(), + gas_used: 0, + timestamp: 5, + extra_data: vec![].into(), + base_fee_per_gas: Uint256::from(7), + block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + transactions: vec![].into(), + }) + .await; + }, + json!({ + "id": STATIC_ID, + "jsonrpc": JSONRPC_VERSION, + "method": ENGINE_EXECUTE_PAYLOAD_V1, + "params": [{ + "parentHash":"0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a", + "feeRecipient":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "stateRoot":"0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45", + "receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsBloom": LOGS_BLOOM_00, + "random": HASH_00, + "blockNumber":"0x1", + "gasLimit":"0x1c9c380", + "gasUsed":"0x0", + "timestamp":"0x5", + "extraData":"0x", + "baseFeePerGas":"0x7", + "blockHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858", + "transactions":[] + }], + }) + ) + .await + .with_preloaded_responses( + // engine_executePayloadV1 RESPONSE validation + vec![json!({ + "jsonrpc": JSONRPC_VERSION, + "id": STATIC_ID, + "result":{ + "status":"VALID", + "latestValidHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858" + } + })], + |client| async move { + let response = client + .execute_payload_v1::(ExecutionPayload::default()) + .await + .unwrap(); + + assert_eq!(response, + ExecutePayloadResponse { + status: ExecutePayloadResponseStatus::Valid, + latest_valid_hash: Some(Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap()), + validation_error: None + } + ); + }, + ) + .await + .assert_request_equals( + // engine_forkchoiceUpdatedV1 REQUEST validation + |client| async move { + let _ = client + .forkchoice_updated_v1( + ForkChoiceState { + head_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + safe_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + finalized_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + }, + None, + ) + .await; + }, + json!({ + "jsonrpc": JSONRPC_VERSION, + "method": ENGINE_FORKCHOICE_UPDATED_V1, + "params": [ + { + "headBlockHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858", + "safeBlockHash":"0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858", + "finalizedBlockHash":"0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a" + }, JSON_NULL], + "id": STATIC_ID + }) + ) + .await + .with_preloaded_responses( + // engine_forkchoiceUpdatedV1 RESPONSE validation + // + // Note: this test was modified to provide `null` rather than `0x`. The geth vectors + // are invalid. + vec![json!({ + "jsonrpc": JSONRPC_VERSION, + "id": STATIC_ID, + "result": { + "status":"SUCCESS", + "payloadId": JSON_NULL, + } + })], + |client| async move { + let response = client + .forkchoice_updated_v1( + ForkChoiceState { + head_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + safe_block_hash: Hash256::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), + finalized_block_hash: Hash256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a").unwrap(), + }, + None, + ) + .await + .unwrap(); + assert_eq!(response, ForkchoiceUpdatedResponse { + status: ForkchoiceUpdatedResponseStatus::Success, + payload_id: None, + }); + }, + ) + .await; + } +} diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs new file mode 100644 index 0000000000..ae6d730fa5 --- /dev/null +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -0,0 +1,476 @@ +use super::*; +use serde::{Deserialize, Serialize}; +use types::{EthSpec, FixedVector, Transaction, Unsigned, VariableList}; + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonRequestBody<'a> { + pub jsonrpc: &'a str, + pub method: &'a str, + pub params: serde_json::Value, + pub id: u32, +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +pub struct JsonError { + pub code: i64, + pub message: String, +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonResponseBody { + pub jsonrpc: String, + #[serde(default)] + pub error: Option, + #[serde(default)] + pub result: serde_json::Value, + pub id: u32, +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct TransparentJsonPayloadId(#[serde(with = "eth2_serde_utils::bytes_8_hex")] pub PayloadId); + +impl From for TransparentJsonPayloadId { + fn from(id: PayloadId) -> Self { + Self(id) + } +} + +impl From for PayloadId { + fn from(wrapper: TransparentJsonPayloadId) -> Self { + wrapper.0 + } +} + +/// On the request, use a transparent wrapper. +pub type JsonPayloadIdRequest = TransparentJsonPayloadId; + +/// On the response, expect without the object wrapper (non-transparent). +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonPayloadIdResponse { + #[serde(with = "eth2_serde_utils::bytes_8_hex")] + pub payload_id: PayloadId, +} + +#[derive(Debug, PartialEq, Default, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase")] +pub struct JsonExecutionPayloadV1 { + pub parent_hash: Hash256, + pub fee_recipient: Address, + pub state_root: Hash256, + pub receipts_root: Hash256, + #[serde(with = "serde_logs_bloom")] + pub logs_bloom: FixedVector, + pub random: Hash256, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub block_number: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub gas_limit: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub gas_used: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub timestamp: u64, + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub extra_data: VariableList, + pub base_fee_per_gas: Uint256, + pub block_hash: Hash256, + #[serde(with = "serde_transactions")] + pub transactions: + VariableList, T::MaxTransactionsPerPayload>, +} + +impl From> for JsonExecutionPayloadV1 { + fn from(e: ExecutionPayload) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let ExecutionPayload { + parent_hash, + fee_recipient, + state_root, + receipt_root, + logs_bloom, + random, + block_number, + gas_limit, + gas_used, + timestamp, + extra_data, + base_fee_per_gas, + block_hash, + transactions, + } = e; + + Self { + parent_hash, + fee_recipient, + state_root, + receipts_root: receipt_root, + logs_bloom, + random, + block_number, + gas_limit, + gas_used, + timestamp, + extra_data, + base_fee_per_gas, + block_hash, + transactions, + } + } +} + +impl From> for ExecutionPayload { + fn from(e: JsonExecutionPayloadV1) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let JsonExecutionPayloadV1 { + parent_hash, + fee_recipient, + state_root, + receipts_root, + logs_bloom, + random, + block_number, + gas_limit, + gas_used, + timestamp, + extra_data, + base_fee_per_gas, + block_hash, + transactions, + } = e; + + Self { + parent_hash, + fee_recipient, + state_root, + receipt_root: receipts_root, + logs_bloom, + random, + block_number, + gas_limit, + gas_used, + timestamp, + extra_data, + base_fee_per_gas, + block_hash, + transactions, + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonPayloadAttributesV1 { + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub timestamp: u64, + pub random: Hash256, + pub suggested_fee_recipient: Address, +} + +impl From for JsonPayloadAttributesV1 { + fn from(p: PayloadAttributes) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let PayloadAttributes { + timestamp, + random, + suggested_fee_recipient, + } = p; + + Self { + timestamp, + random, + suggested_fee_recipient, + } + } +} + +impl From for PayloadAttributes { + fn from(j: JsonPayloadAttributesV1) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let JsonPayloadAttributesV1 { + timestamp, + random, + suggested_fee_recipient, + } = j; + + Self { + timestamp, + random, + suggested_fee_recipient, + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonForkChoiceStateV1 { + pub head_block_hash: Hash256, + pub safe_block_hash: Hash256, + pub finalized_block_hash: Hash256, +} + +impl From for JsonForkChoiceStateV1 { + fn from(f: ForkChoiceState) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let ForkChoiceState { + head_block_hash, + safe_block_hash, + finalized_block_hash, + } = f; + + Self { + head_block_hash, + safe_block_hash, + finalized_block_hash, + } + } +} + +impl From for ForkChoiceState { + fn from(j: JsonForkChoiceStateV1) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let JsonForkChoiceStateV1 { + head_block_hash, + safe_block_hash, + finalized_block_hash, + } = j; + + Self { + head_block_hash, + safe_block_hash, + finalized_block_hash, + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum JsonExecutePayloadV1ResponseStatus { + Valid, + Invalid, + Syncing, +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonExecutePayloadV1Response { + pub status: JsonExecutePayloadV1ResponseStatus, + pub latest_valid_hash: Option, + pub validation_error: Option, +} + +impl From for JsonExecutePayloadV1ResponseStatus { + fn from(e: ExecutePayloadResponseStatus) -> Self { + match e { + ExecutePayloadResponseStatus::Valid => JsonExecutePayloadV1ResponseStatus::Valid, + ExecutePayloadResponseStatus::Invalid => JsonExecutePayloadV1ResponseStatus::Invalid, + ExecutePayloadResponseStatus::Syncing => JsonExecutePayloadV1ResponseStatus::Syncing, + } + } +} +impl From for ExecutePayloadResponseStatus { + fn from(j: JsonExecutePayloadV1ResponseStatus) -> Self { + match j { + JsonExecutePayloadV1ResponseStatus::Valid => ExecutePayloadResponseStatus::Valid, + JsonExecutePayloadV1ResponseStatus::Invalid => ExecutePayloadResponseStatus::Invalid, + JsonExecutePayloadV1ResponseStatus::Syncing => ExecutePayloadResponseStatus::Syncing, + } + } +} + +impl From for JsonExecutePayloadV1Response { + fn from(e: ExecutePayloadResponse) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let ExecutePayloadResponse { + status, + latest_valid_hash, + validation_error, + } = e; + + Self { + status: status.into(), + latest_valid_hash, + validation_error, + } + } +} + +impl From for ExecutePayloadResponse { + fn from(j: JsonExecutePayloadV1Response) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let JsonExecutePayloadV1Response { + status, + latest_valid_hash, + validation_error, + } = j; + + Self { + status: status.into(), + latest_valid_hash, + validation_error, + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum JsonForkchoiceUpdatedV1ResponseStatus { + Success, + Syncing, +} +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonForkchoiceUpdatedV1Response { + pub status: JsonForkchoiceUpdatedV1ResponseStatus, + pub payload_id: Option, +} + +impl From for ForkchoiceUpdatedResponseStatus { + fn from(j: JsonForkchoiceUpdatedV1ResponseStatus) -> Self { + match j { + JsonForkchoiceUpdatedV1ResponseStatus::Success => { + ForkchoiceUpdatedResponseStatus::Success + } + JsonForkchoiceUpdatedV1ResponseStatus::Syncing => { + ForkchoiceUpdatedResponseStatus::Syncing + } + } + } +} +impl From for JsonForkchoiceUpdatedV1ResponseStatus { + fn from(f: ForkchoiceUpdatedResponseStatus) -> Self { + match f { + ForkchoiceUpdatedResponseStatus::Success => { + JsonForkchoiceUpdatedV1ResponseStatus::Success + } + ForkchoiceUpdatedResponseStatus::Syncing => { + JsonForkchoiceUpdatedV1ResponseStatus::Syncing + } + } + } +} +impl From for ForkchoiceUpdatedResponse { + fn from(j: JsonForkchoiceUpdatedV1Response) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let JsonForkchoiceUpdatedV1Response { status, payload_id } = j; + + Self { + status: status.into(), + payload_id: payload_id.map(Into::into), + } + } +} +impl From for JsonForkchoiceUpdatedV1Response { + fn from(f: ForkchoiceUpdatedResponse) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let ForkchoiceUpdatedResponse { status, payload_id } = f; + + Self { + status: status.into(), + payload_id: payload_id.map(Into::into), + } + } +} + +/// Serializes the `logs_bloom` field of an `ExecutionPayload`. +pub mod serde_logs_bloom { + use super::*; + use eth2_serde_utils::hex::PrefixedHexVisitor; + use serde::{Deserializer, Serializer}; + + pub fn serialize(bytes: &FixedVector, serializer: S) -> Result + where + S: Serializer, + U: Unsigned, + { + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes[..])); + + serializer.serialize_str(&hex_string) + } + + pub fn deserialize<'de, D, U>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + U: Unsigned, + { + let vec = deserializer.deserialize_string(PrefixedHexVisitor)?; + + FixedVector::new(vec) + .map_err(|e| serde::de::Error::custom(format!("invalid logs bloom: {:?}", e))) + } +} + +/// Serializes the `transactions` field of an `ExecutionPayload`. +pub mod serde_transactions { + use super::*; + use eth2_serde_utils::hex; + use serde::ser::SerializeSeq; + use serde::{de, Deserializer, Serializer}; + use std::marker::PhantomData; + + type Value = VariableList, N>; + + #[derive(Default)] + pub struct ListOfBytesListVisitor { + _phantom_m: PhantomData, + _phantom_n: PhantomData, + } + + impl<'a, M: Unsigned, N: Unsigned> serde::de::Visitor<'a> for ListOfBytesListVisitor { + type Value = Value; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a list of 0x-prefixed byte lists") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'a>, + { + let mut outer = VariableList::default(); + + while let Some(val) = seq.next_element::()? { + let inner_vec = hex::decode(&val).map_err(de::Error::custom)?; + let transaction = VariableList::new(inner_vec).map_err(|e| { + serde::de::Error::custom(format!("transaction too large: {:?}", e)) + })?; + outer.push(transaction).map_err(|e| { + serde::de::Error::custom(format!("too many transactions: {:?}", e)) + })?; + } + + Ok(outer) + } + } + + pub fn serialize( + value: &Value, + serializer: S, + ) -> Result + where + S: Serializer, + { + let mut seq = serializer.serialize_seq(Some(value.len()))?; + for transaction in value { + // It's important to match on the inner values of the transaction. Serializing the + // entire `Transaction` will result in appending the SSZ union prefix byte. The + // execution node does not want that. + let hex = hex::encode(&transaction[..]); + seq.serialize_element(&hex)?; + } + seq.end() + } + + pub fn deserialize<'de, D, M: Unsigned, N: Unsigned>( + deserializer: D, + ) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let visitor: ListOfBytesListVisitor = <_>::default(); + deserializer.deserialize_any(visitor) + } +} diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs new file mode 100644 index 0000000000..5db00d37f6 --- /dev/null +++ b/beacon_node/execution_layer/src/engines.rs @@ -0,0 +1,398 @@ +//! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. + +use crate::engine_api::{EngineApi, Error as EngineApiError, PayloadAttributes, PayloadId}; +use futures::future::join_all; +use lru::LruCache; +use slog::{crit, debug, info, warn, Logger}; +use std::future::Future; +use tokio::sync::{Mutex, RwLock}; +use types::{Address, Hash256}; + +/// The number of payload IDs that will be stored for each `Engine`. +/// +/// Since the size of each value is small (~100 bytes) a large number is used for safety. +const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; + +/// Stores the remembered state of a engine. +#[derive(Copy, Clone, PartialEq)] +enum EngineState { + Synced, + Offline, + Syncing, +} + +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct ForkChoiceState { + pub head_block_hash: Hash256, + pub safe_block_hash: Hash256, + pub finalized_block_hash: Hash256, +} + +/// Used to enable/disable logging on some tasks. +#[derive(Copy, Clone, PartialEq)] +pub enum Logging { + Enabled, + Disabled, +} + +impl Logging { + pub fn is_enabled(&self) -> bool { + match self { + Logging::Enabled => true, + Logging::Disabled => false, + } + } +} + +#[derive(Hash, PartialEq, std::cmp::Eq)] +struct PayloadIdCacheKey { + pub head_block_hash: Hash256, + pub timestamp: u64, + pub random: Hash256, + pub suggested_fee_recipient: Address, +} + +/// An execution engine. +pub struct Engine { + pub id: String, + pub api: T, + payload_id_cache: Mutex>, + state: RwLock, +} + +impl Engine { + /// Creates a new, offline engine. + pub fn new(id: String, api: T) -> Self { + Self { + id, + api, + payload_id_cache: Mutex::new(LruCache::new(PAYLOAD_ID_LRU_CACHE_SIZE)), + state: RwLock::new(EngineState::Offline), + } + } + + pub async fn get_payload_id( + &self, + head_block_hash: Hash256, + timestamp: u64, + random: Hash256, + suggested_fee_recipient: Address, + ) -> Option { + self.payload_id_cache + .lock() + .await + .get(&PayloadIdCacheKey { + head_block_hash, + timestamp, + random, + suggested_fee_recipient, + }) + .cloned() + } +} + +impl Engine { + pub async fn notify_forkchoice_updated( + &self, + forkchoice_state: ForkChoiceState, + payload_attributes: Option, + log: &Logger, + ) -> Result, EngineApiError> { + let response = self + .api + .forkchoice_updated_v1(forkchoice_state, payload_attributes) + .await?; + + if let Some(payload_id) = response.payload_id { + if let Some(key) = + payload_attributes.map(|pa| PayloadIdCacheKey::new(&forkchoice_state, &pa)) + { + self.payload_id_cache.lock().await.put(key, payload_id); + } else { + debug!( + log, + "Engine returned unexpected payload_id"; + "payload_id" => ?payload_id + ); + } + } + + Ok(response.payload_id) + } +} + +/// Holds multiple execution engines and provides functionality for managing them in a fallback +/// manner. +pub struct Engines { + pub engines: Vec>, + pub latest_forkchoice_state: RwLock>, + pub log: Logger, +} + +#[derive(Debug)] +pub enum EngineError { + Offline { id: String }, + Api { id: String, error: EngineApiError }, +} + +impl Engines { + async fn get_latest_forkchoice_state(&self) -> Option { + *self.latest_forkchoice_state.read().await + } + + pub async fn set_latest_forkchoice_state(&self, state: ForkChoiceState) { + *self.latest_forkchoice_state.write().await = Some(state); + } + + async fn send_latest_forkchoice_state(&self, engine: &Engine) { + let latest_forkchoice_state = self.get_latest_forkchoice_state().await; + + if let Some(forkchoice_state) = latest_forkchoice_state { + info!( + self.log, + "Issuing forkchoiceUpdated"; + "forkchoice_state" => ?forkchoice_state, + "id" => &engine.id, + ); + + // For simplicity, payload attributes are never included in this call. It may be + // reasonable to include them in the future. + if let Err(e) = engine + .api + .forkchoice_updated_v1(forkchoice_state, None) + .await + { + debug!( + self.log, + "Failed to issue latest head to engine"; + "error" => ?e, + "id" => &engine.id, + ); + } + } else { + debug!( + self.log, + "No head, not sending to engine"; + "id" => &engine.id, + ); + } + } + + /// Returns `true` if there is at least one engine with a "synced" status. + pub async fn any_synced(&self) -> bool { + for engine in &self.engines { + if *engine.state.read().await == EngineState::Synced { + return true; + } + } + false + } + + /// Run the `EngineApi::upcheck` function on all nodes which are currently offline. + /// + /// This can be used to try and recover any offline nodes. + pub async fn upcheck_not_synced(&self, logging: Logging) { + let upcheck_futures = self.engines.iter().map(|engine| async move { + let mut state_lock = engine.state.write().await; + if *state_lock != EngineState::Synced { + match engine.api.upcheck().await { + Ok(()) => { + if logging.is_enabled() { + info!( + self.log, + "Execution engine online"; + "id" => &engine.id + ); + } + + // Send the node our latest forkchoice_state. + self.send_latest_forkchoice_state(engine).await; + + *state_lock = EngineState::Synced + } + Err(EngineApiError::IsSyncing) => { + if logging.is_enabled() { + warn!( + self.log, + "Execution engine syncing"; + "id" => &engine.id + ) + } + + // Send the node our latest forkchoice_state, it may assist with syncing. + self.send_latest_forkchoice_state(engine).await; + + *state_lock = EngineState::Syncing + } + Err(e) => { + if logging.is_enabled() { + warn!( + self.log, + "Execution engine offline"; + "error" => ?e, + "id" => &engine.id + ) + } + } + } + } + *state_lock + }); + + let num_synced = join_all(upcheck_futures) + .await + .into_iter() + .filter(|state: &EngineState| *state == EngineState::Synced) + .count(); + + if num_synced == 0 && logging.is_enabled() { + crit!( + self.log, + "No synced execution engines"; + ) + } + } + + /// Run `func` on all engines, in the order in which they are defined, returning the first + /// successful result that is found. + /// + /// This function might try to run `func` twice. If all nodes return an error on the first time + /// it runs, it will try to upcheck all offline nodes and then run the function again. + pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result> + where + F: Fn(&'a Engine) -> G + Copy, + G: Future>, + { + match self.first_success_without_retry(func).await { + Ok(result) => Ok(result), + Err(mut first_errors) => { + // Try to recover some nodes. + self.upcheck_not_synced(Logging::Enabled).await; + // Retry the call on all nodes. + match self.first_success_without_retry(func).await { + Ok(result) => Ok(result), + Err(second_errors) => { + first_errors.extend(second_errors); + Err(first_errors) + } + } + } + } + } + + /// Run `func` on all engines, in the order in which they are defined, returning the first + /// successful result that is found. + async fn first_success_without_retry<'a, F, G, H>( + &'a self, + func: F, + ) -> Result> + where + F: Fn(&'a Engine) -> G, + G: Future>, + { + let mut errors = vec![]; + + for engine in &self.engines { + let engine_synced = *engine.state.read().await == EngineState::Synced; + if engine_synced { + match func(engine).await { + Ok(result) => return Ok(result), + Err(error) => { + debug!( + self.log, + "Execution engine call failed"; + "error" => ?error, + "id" => &engine.id + ); + *engine.state.write().await = EngineState::Offline; + errors.push(EngineError::Api { + id: engine.id.clone(), + error, + }) + } + } + } else { + errors.push(EngineError::Offline { + id: engine.id.clone(), + }) + } + } + + Err(errors) + } + + /// Runs `func` on all nodes concurrently, returning all results. Any nodes that are offline + /// will be ignored, however all synced or unsynced nodes will receive the broadcast. + /// + /// This function might try to run `func` twice. If all nodes return an error on the first time + /// it runs, it will try to upcheck all offline nodes and then run the function again. + pub async fn broadcast<'a, F, G, H>(&'a self, func: F) -> Vec> + where + F: Fn(&'a Engine) -> G + Copy, + G: Future>, + { + let first_results = self.broadcast_without_retry(func).await; + + let mut any_offline = false; + for result in &first_results { + match result { + Ok(_) => return first_results, + Err(EngineError::Offline { .. }) => any_offline = true, + _ => (), + } + } + + if any_offline { + self.upcheck_not_synced(Logging::Enabled).await; + self.broadcast_without_retry(func).await + } else { + first_results + } + } + + /// Runs `func` on all nodes concurrently, returning all results. + pub async fn broadcast_without_retry<'a, F, G, H>( + &'a self, + func: F, + ) -> Vec> + where + F: Fn(&'a Engine) -> G, + G: Future>, + { + let func = &func; + let futures = self.engines.iter().map(|engine| async move { + let is_offline = *engine.state.read().await == EngineState::Offline; + if !is_offline { + func(engine).await.map_err(|error| { + debug!( + self.log, + "Execution engine call failed"; + "error" => ?error, + "id" => &engine.id + ); + EngineError::Api { + id: engine.id.clone(), + error, + } + }) + } else { + Err(EngineError::Offline { + id: engine.id.clone(), + }) + } + }); + + join_all(futures).await + } +} + +impl PayloadIdCacheKey { + fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { + Self { + head_block_hash: state.head_block_hash, + timestamp: attributes.timestamp, + random: attributes.random, + suggested_fee_recipient: attributes.suggested_fee_recipient, + } + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs new file mode 100644 index 0000000000..5c069f0b0b --- /dev/null +++ b/beacon_node/execution_layer/src/lib.rs @@ -0,0 +1,759 @@ +//! This crate provides an abstraction over one or more *execution engines*. An execution engine +//! was formerly known as an "eth1 node", like Geth, Nethermind, Erigon, etc. +//! +//! This crate only provides useful functionality for "The Merge", it does not provide any of the +//! deposit-contract functionality that the `beacon_node/eth1` crate already provides. + +use engine_api::{Error as ApiError, *}; +use engines::{Engine, EngineError, Engines, ForkChoiceState, Logging}; +use lru::LruCache; +use sensitive_url::SensitiveUrl; +use slog::{crit, debug, error, info, Logger}; +use slot_clock::SlotClock; +use std::future::Future; +use std::sync::Arc; +use std::time::Duration; +use task_executor::TaskExecutor; +use tokio::{ + sync::{Mutex, MutexGuard}, + time::{sleep, sleep_until, Instant}, +}; +use types::ChainSpec; + +pub use engine_api::{http::HttpJsonRpc, ExecutePayloadResponseStatus}; + +mod engine_api; +mod engines; +pub mod test_utils; + +/// Each time the `ExecutionLayer` retrieves a block from an execution node, it stores that block +/// in an LRU cache to avoid redundant lookups. This is the size of that cache. +const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128; + +#[derive(Debug)] +pub enum Error { + NoEngines, + ApiError(ApiError), + EngineErrors(Vec), + NotSynced, + ShuttingDown, + FeeRecipientUnspecified, +} + +impl From for Error { + fn from(e: ApiError) -> Self { + Error::ApiError(e) + } +} + +struct Inner { + engines: Engines, + suggested_fee_recipient: Option
, + execution_blocks: Mutex>, + executor: TaskExecutor, + log: Logger, +} + +/// Provides access to one or more execution engines and provides a neat interface for consumption +/// by the `BeaconChain`. +/// +/// When there is more than one execution node specified, the others will be used in a "fallback" +/// fashion. Some requests may be broadcast to all nodes and others might only be sent to the first +/// node that returns a valid response. Ultimately, the purpose of fallback nodes is to provide +/// redundancy in the case where one node is offline. +/// +/// The fallback nodes have an ordering. The first supplied will be the first contacted, and so on. +#[derive(Clone)] +pub struct ExecutionLayer { + inner: Arc, +} + +impl ExecutionLayer { + /// Instantiate `Self` with `urls.len()` engines, all using the JSON-RPC via HTTP. + pub fn from_urls( + urls: Vec, + suggested_fee_recipient: Option
, + executor: TaskExecutor, + log: Logger, + ) -> Result { + if urls.is_empty() { + return Err(Error::NoEngines); + } + + let engines = urls + .into_iter() + .map(|url| { + let id = url.to_string(); + let api = HttpJsonRpc::new(url)?; + Ok(Engine::new(id, api)) + }) + .collect::>()?; + + let inner = Inner { + engines: Engines { + engines, + latest_forkchoice_state: <_>::default(), + log: log.clone(), + }, + suggested_fee_recipient, + execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), + executor, + log, + }; + + Ok(Self { + inner: Arc::new(inner), + }) + } +} + +impl ExecutionLayer { + fn engines(&self) -> &Engines { + &self.inner.engines + } + + fn executor(&self) -> &TaskExecutor { + &self.inner.executor + } + + fn suggested_fee_recipient(&self) -> Result { + self.inner + .suggested_fee_recipient + .ok_or(Error::FeeRecipientUnspecified) + } + + /// Note: this function returns a mutex guard, be careful to avoid deadlocks. + async fn execution_blocks(&self) -> MutexGuard<'_, LruCache> { + self.inner.execution_blocks.lock().await + } + + fn log(&self) -> &Logger { + &self.inner.log + } + + /// Convenience function to allow calling async functions in a non-async context. + pub fn block_on<'a, T, U, V>(&'a self, generate_future: T) -> Result + where + T: Fn(&'a Self) -> U, + U: Future>, + { + let runtime = self + .executor() + .runtime() + .upgrade() + .ok_or(Error::ShuttingDown)?; + // TODO(merge): respect the shutdown signal. + runtime.block_on(generate_future(self)) + } + + /// Convenience function to allow calling async functions in a non-async context. + /// + /// The function is "generic" since it does not enforce a particular return type on + /// `generate_future`. + pub fn block_on_generic<'a, T, U, V>(&'a self, generate_future: T) -> Result + where + T: Fn(&'a Self) -> U, + U: Future, + { + let runtime = self + .executor() + .runtime() + .upgrade() + .ok_or(Error::ShuttingDown)?; + // TODO(merge): respect the shutdown signal. + Ok(runtime.block_on(generate_future(self))) + } + + /// Convenience function to allow spawning a task without waiting for the result. + pub fn spawn(&self, generate_future: T, name: &'static str) + where + T: FnOnce(Self) -> U, + U: Future + Send + 'static, + { + self.executor().spawn(generate_future(self.clone()), name); + } + + /// Spawns a routine which attempts to keep the execution engines online. + pub fn spawn_watchdog_routine(&self, slot_clock: S) { + let watchdog = |el: ExecutionLayer| async move { + // Run one task immediately. + el.watchdog_task().await; + + let recurring_task = + |el: ExecutionLayer, now: Instant, duration_to_next_slot: Duration| async move { + // We run the task three times per slot. + // + // The interval between each task is 1/3rd of the slot duration. This matches nicely + // with the attestation production times (unagg. at 1/3rd, agg at 2/3rd). + // + // Each task is offset by 3/4ths of the interval. + // + // On mainnet, this means we will run tasks at: + // + // - 3s after slot start: 1s before publishing unaggregated attestations. + // - 7s after slot start: 1s before publishing aggregated attestations. + // - 11s after slot start: 1s before the next slot starts. + let interval = duration_to_next_slot / 3; + let offset = (interval / 4) * 3; + + let first_execution = duration_to_next_slot + offset; + let second_execution = first_execution + interval; + let third_execution = second_execution + interval; + + sleep_until(now + first_execution).await; + el.engines().upcheck_not_synced(Logging::Disabled).await; + + sleep_until(now + second_execution).await; + el.engines().upcheck_not_synced(Logging::Disabled).await; + + sleep_until(now + third_execution).await; + el.engines().upcheck_not_synced(Logging::Disabled).await; + }; + + // Start the loop to periodically update. + loop { + if let Some(duration) = slot_clock.duration_to_next_slot() { + let now = Instant::now(); + + // Spawn a new task rather than waiting for this to finish. This ensure that a + // slow run doesn't prevent the next run from starting. + el.spawn(|el| recurring_task(el, now, duration), "exec_watchdog_task"); + } else { + error!(el.log(), "Failed to spawn watchdog task"); + } + sleep(slot_clock.slot_duration()).await; + } + }; + + self.spawn(watchdog, "exec_watchdog"); + } + + /// Performs a single execution of the watchdog routine. + async fn watchdog_task(&self) { + // Disable logging since this runs frequently and may get annoying. + self.engines().upcheck_not_synced(Logging::Disabled).await; + } + + /// Returns `true` if there is at least one synced and reachable engine. + pub async fn is_synced(&self) -> bool { + self.engines().any_synced().await + } + + /// Maps to the `engine_getPayload` JSON-RPC call. + /// + /// However, it will attempt to call `self.prepare_payload` if it cannot find an existing + /// payload id for the given parameters. + /// + /// ## Fallback Behavior + /// + /// The result will be returned from the first node that returns successfully. No more nodes + /// will be contacted. + pub async fn get_payload( + &self, + parent_hash: Hash256, + timestamp: u64, + random: Hash256, + finalized_block_hash: Hash256, + ) -> Result, Error> { + let suggested_fee_recipient = self.suggested_fee_recipient()?; + debug!( + self.log(), + "Issuing engine_getPayload"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "random" => ?random, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + self.engines() + .first_success(|engine| async move { + let payload_id = if let Some(id) = engine + .get_payload_id(parent_hash, timestamp, random, suggested_fee_recipient) + .await + { + // The payload id has been cached for this engine. + id + } else { + // The payload id has *not* been cached for this engine. Trigger an artificial + // fork choice update to retrieve a payload ID. + // + // TODO(merge): a better algorithm might try to favour a node that already had a + // cached payload id, since a payload that has had more time to produce is + // likely to be more profitable. + let fork_choice_state = ForkChoiceState { + head_block_hash: parent_hash, + safe_block_hash: parent_hash, + finalized_block_hash, + }; + let payload_attributes = PayloadAttributes { + timestamp, + random, + suggested_fee_recipient, + }; + + engine + .notify_forkchoice_updated( + fork_choice_state, + Some(payload_attributes), + self.log(), + ) + .await? + .ok_or(ApiError::PayloadIdUnavailable)? + }; + + engine.api.get_payload_v1(payload_id).await + }) + .await + .map_err(Error::EngineErrors) + } + + /// Maps to the `engine_executePayload` JSON-RPC call. + /// + /// ## Fallback Behaviour + /// + /// The request will be broadcast to all nodes, simultaneously. It will await a response (or + /// failure) from all nodes and then return based on the first of these conditions which + /// returns true: + /// + /// - Valid, if any nodes return valid. + /// - Invalid, if any nodes return invalid. + /// - Syncing, if any nodes return syncing. + /// - An error, if all nodes return an error. + pub async fn execute_payload( + &self, + execution_payload: &ExecutionPayload, + ) -> Result<(ExecutePayloadResponseStatus, Option), Error> { + debug!( + self.log(), + "Issuing engine_executePayload"; + "parent_hash" => ?execution_payload.parent_hash, + "block_hash" => ?execution_payload.block_hash, + "block_number" => execution_payload.block_number, + ); + + let broadcast_results = self + .engines() + .broadcast(|engine| engine.api.execute_payload_v1(execution_payload.clone())) + .await; + + let mut errors = vec![]; + let mut valid = 0; + let mut invalid = 0; + let mut syncing = 0; + let mut invalid_latest_valid_hash = vec![]; + for result in broadcast_results { + match result.map(|response| (response.latest_valid_hash, response.status)) { + Ok((Some(latest_hash), ExecutePayloadResponseStatus::Valid)) => { + if latest_hash == execution_payload.block_hash { + valid += 1; + } else { + invalid += 1; + errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse( + format!( + "execute_payload: response.status = Valid but invalid latest_valid_hash. Expected({:?}) Found({:?})", + execution_payload.block_hash, + latest_hash, + ) + ), + }); + invalid_latest_valid_hash.push(latest_hash); + } + } + Ok((Some(latest_hash), ExecutePayloadResponseStatus::Invalid)) => { + invalid += 1; + invalid_latest_valid_hash.push(latest_hash); + } + Ok((_, ExecutePayloadResponseStatus::Syncing)) => syncing += 1, + Ok((None, status)) => errors.push(EngineError::Api { + id: "unknown".to_string(), + error: engine_api::Error::BadResponse(format!( + "execute_payload: status {:?} returned with null latest_valid_hash", + status + )), + }), + Err(e) => errors.push(e), + } + } + + if valid > 0 && invalid > 0 { + crit!( + self.log(), + "Consensus failure between execution nodes"; + "method" => "execute_payload" + ); + } + + if valid > 0 { + Ok(( + ExecutePayloadResponseStatus::Valid, + Some(execution_payload.block_hash), + )) + } else if invalid > 0 { + Ok((ExecutePayloadResponseStatus::Invalid, None)) + } else if syncing > 0 { + Ok((ExecutePayloadResponseStatus::Syncing, None)) + } else { + Err(Error::EngineErrors(errors)) + } + } + + /// Maps to the `engine_consensusValidated` JSON-RPC call. + /// + /// ## Fallback Behaviour + /// + /// The request will be broadcast to all nodes, simultaneously. It will await a response (or + /// failure) from all nodes and then return based on the first of these conditions which + /// returns true: + /// + /// - Ok, if any node returns successfully. + /// - An error, if all nodes return an error. + pub async fn notify_forkchoice_updated( + &self, + head_block_hash: Hash256, + finalized_block_hash: Hash256, + payload_attributes: Option, + ) -> Result<(), Error> { + debug!( + self.log(), + "Issuing engine_forkchoiceUpdated"; + "finalized_block_hash" => ?finalized_block_hash, + "head_block_hash" => ?head_block_hash, + ); + + // see https://hackmd.io/@n0ble/kintsugi-spec#Engine-API + // for now, we must set safe_block_hash = head_block_hash + let forkchoice_state = ForkChoiceState { + head_block_hash, + safe_block_hash: head_block_hash, + finalized_block_hash, + }; + + self.engines() + .set_latest_forkchoice_state(forkchoice_state) + .await; + + let broadcast_results = self + .engines() + .broadcast(|engine| async move { + engine + .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) + .await + }) + .await; + + if broadcast_results.iter().any(Result::is_ok) { + Ok(()) + } else { + let errors = broadcast_results + .into_iter() + .filter_map(Result::err) + .collect(); + Err(Error::EngineErrors(errors)) + } + } + + /// Used during block production to determine if the merge has been triggered. + /// + /// ## Specification + /// + /// `get_terminal_pow_block_hash` + /// + /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md + pub async fn get_terminal_pow_block_hash( + &self, + spec: &ChainSpec, + ) -> Result, Error> { + let hash_opt = self + .engines() + .first_success(|engine| async move { + let terminal_block_hash = spec.terminal_block_hash; + if terminal_block_hash != Hash256::zero() { + if self + .get_pow_block(engine, terminal_block_hash) + .await? + .is_some() + { + return Ok(Some(terminal_block_hash)); + } else { + return Ok(None); + } + } + + self.get_pow_block_hash_at_total_difficulty(engine, spec) + .await + }) + .await + .map_err(Error::EngineErrors)?; + + if let Some(hash) = &hash_opt { + info!( + self.log(), + "Found terminal block hash"; + "terminal_block_hash_override" => ?spec.terminal_block_hash, + "terminal_total_difficulty" => ?spec.terminal_total_difficulty, + "block_hash" => ?hash, + ); + } + + Ok(hash_opt) + } + + /// This function should remain internal. External users should use + /// `self.get_terminal_pow_block` instead, since it checks against the terminal block hash + /// override. + /// + /// ## Specification + /// + /// `get_pow_block_at_terminal_total_difficulty` + /// + /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md + async fn get_pow_block_hash_at_total_difficulty( + &self, + engine: &Engine, + spec: &ChainSpec, + ) -> Result, ApiError> { + let mut block = engine + .api + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) + .await? + .ok_or(ApiError::ExecutionHeadBlockNotFound)?; + + self.execution_blocks().await.put(block.block_hash, block); + + loop { + let block_reached_ttd = block.total_difficulty >= spec.terminal_total_difficulty; + if block_reached_ttd { + if block.parent_hash == Hash256::zero() { + return Ok(Some(block.block_hash)); + } + let parent = self + .get_pow_block(engine, block.parent_hash) + .await? + .ok_or(ApiError::ExecutionBlockNotFound(block.parent_hash))?; + let parent_reached_ttd = parent.total_difficulty >= spec.terminal_total_difficulty; + + if block_reached_ttd && !parent_reached_ttd { + return Ok(Some(block.block_hash)); + } else { + block = parent; + } + } else { + return Ok(None); + } + } + } + + /// Used during block verification to check that a block correctly triggers the merge. + /// + /// ## Returns + /// + /// - `Some(true)` if the given `block_hash` is the terminal proof-of-work block. + /// - `Some(false)` if the given `block_hash` is certainly *not* the terminal proof-of-work + /// block. + /// - `None` if the `block_hash` or its parent were not present on the execution engines. + /// - `Err(_)` if there was an error connecting to the execution engines. + /// + /// ## Fallback Behaviour + /// + /// The request will be broadcast to all nodes, simultaneously. It will await a response (or + /// failure) from all nodes and then return based on the first of these conditions which + /// returns true: + /// + /// - Terminal, if any node indicates it is terminal. + /// - Not terminal, if any node indicates it is non-terminal. + /// - Block not found, if any node cannot find the block. + /// - An error, if all nodes return an error. + /// + /// ## Specification + /// + /// `is_valid_terminal_pow_block` + /// + /// https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/fork-choice.md + pub async fn is_valid_terminal_pow_block_hash( + &self, + block_hash: Hash256, + spec: &ChainSpec, + ) -> Result, Error> { + let broadcast_results = self + .engines() + .broadcast(|engine| async move { + if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? { + if let Some(pow_parent) = + self.get_pow_block(engine, pow_block.parent_hash).await? + { + return Ok(Some( + self.is_valid_terminal_pow_block(pow_block, pow_parent, spec), + )); + } + } + + Ok(None) + }) + .await; + + let mut errors = vec![]; + let mut terminal = 0; + let mut not_terminal = 0; + let mut block_missing = 0; + for result in broadcast_results { + match result { + Ok(Some(true)) => terminal += 1, + Ok(Some(false)) => not_terminal += 1, + Ok(None) => block_missing += 1, + Err(e) => errors.push(e), + } + } + + if terminal > 0 && not_terminal > 0 { + crit!( + self.log(), + "Consensus failure between execution nodes"; + "method" => "is_valid_terminal_pow_block_hash" + ); + } + + if terminal > 0 { + Ok(Some(true)) + } else if not_terminal > 0 { + Ok(Some(false)) + } else if block_missing > 0 { + Ok(None) + } else { + Err(Error::EngineErrors(errors)) + } + } + + /// This function should remain internal. + /// + /// External users should use `self.is_valid_terminal_pow_block_hash`. + fn is_valid_terminal_pow_block( + &self, + block: ExecutionBlock, + parent: ExecutionBlock, + spec: &ChainSpec, + ) -> bool { + let is_total_difficulty_reached = block.total_difficulty >= spec.terminal_total_difficulty; + let is_parent_total_difficulty_valid = + parent.total_difficulty < spec.terminal_total_difficulty; + is_total_difficulty_reached && is_parent_total_difficulty_valid + } + + /// Maps to the `eth_getBlockByHash` JSON-RPC call. + /// + /// ## TODO(merge) + /// + /// This will return an execution block regardless of whether or not it was created by a PoW + /// miner (pre-merge) or a PoS validator (post-merge). It's not immediately clear if this is + /// correct or not, see the discussion here: + /// + /// https://github.com/ethereum/consensus-specs/issues/2636 + async fn get_pow_block( + &self, + engine: &Engine, + hash: Hash256, + ) -> Result, ApiError> { + if let Some(cached) = self.execution_blocks().await.get(&hash).copied() { + // The block was in the cache, no need to request it from the execution + // engine. + return Ok(Some(cached)); + } + + // The block was *not* in the cache, request it from the execution + // engine and cache it for future reference. + if let Some(block) = engine.api.get_block_by_hash(hash).await? { + self.execution_blocks().await.put(hash, block); + Ok(Some(block)) + } else { + Ok(None) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::test_utils::MockExecutionLayer as GenericMockExecutionLayer; + use types::MainnetEthSpec; + + type MockExecutionLayer = GenericMockExecutionLayer; + + #[tokio::test] + async fn produce_three_valid_pos_execution_blocks() { + MockExecutionLayer::default_params() + .move_to_terminal_block() + .produce_valid_execution_payload_on_head() + .await + .produce_valid_execution_payload_on_head() + .await + .produce_valid_execution_payload_on_head() + .await; + } + + #[tokio::test] + async fn finds_valid_terminal_block_hash() { + MockExecutionLayer::default_params() + .move_to_block_prior_to_terminal_block() + .with_terminal_block(|spec, el, _| async move { + assert_eq!(el.get_terminal_pow_block_hash(&spec).await.unwrap(), None) + }) + .await + .move_to_terminal_block() + .with_terminal_block(|spec, el, terminal_block| async move { + assert_eq!( + el.get_terminal_pow_block_hash(&spec).await.unwrap(), + Some(terminal_block.unwrap().block_hash) + ) + }) + .await; + } + + #[tokio::test] + async fn verifies_valid_terminal_block_hash() { + MockExecutionLayer::default_params() + .move_to_terminal_block() + .with_terminal_block(|spec, el, terminal_block| async move { + assert_eq!( + el.is_valid_terminal_pow_block_hash(terminal_block.unwrap().block_hash, &spec) + .await + .unwrap(), + Some(true) + ) + }) + .await; + } + + #[tokio::test] + async fn rejects_invalid_terminal_block_hash() { + MockExecutionLayer::default_params() + .move_to_terminal_block() + .with_terminal_block(|spec, el, terminal_block| async move { + let invalid_terminal_block = terminal_block.unwrap().parent_hash; + + assert_eq!( + el.is_valid_terminal_pow_block_hash(invalid_terminal_block, &spec) + .await + .unwrap(), + Some(false) + ) + }) + .await; + } + + #[tokio::test] + async fn rejects_unknown_terminal_block_hash() { + MockExecutionLayer::default_params() + .move_to_terminal_block() + .with_terminal_block(|spec, el, _| async move { + let missing_terminal_block = Hash256::repeat_byte(42); + + assert_eq!( + el.is_valid_terminal_pow_block_hash(missing_terminal_block, &spec) + .await + .unwrap(), + None + ) + }) + .await; + } +} diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs new file mode 100644 index 0000000000..552bea0ea4 --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -0,0 +1,454 @@ +use crate::engine_api::{ + ExecutePayloadResponse, ExecutePayloadResponseStatus, ExecutionBlock, PayloadAttributes, + PayloadId, +}; +use crate::engines::ForkChoiceState; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; +use types::{EthSpec, ExecutionPayload, Hash256, Uint256}; + +const GAS_LIMIT: u64 = 16384; +const GAS_USED: u64 = GAS_LIMIT - 1; + +#[derive(Clone, Debug, PartialEq)] +#[allow(clippy::large_enum_variant)] // This struct is only for testing. +pub enum Block { + PoW(PoWBlock), + PoS(ExecutionPayload), +} + +impl Block { + pub fn block_number(&self) -> u64 { + match self { + Block::PoW(block) => block.block_number, + Block::PoS(payload) => payload.block_number, + } + } + + pub fn parent_hash(&self) -> Hash256 { + match self { + Block::PoW(block) => block.parent_hash, + Block::PoS(payload) => payload.parent_hash, + } + } + + pub fn block_hash(&self) -> Hash256 { + match self { + Block::PoW(block) => block.block_hash, + Block::PoS(payload) => payload.block_hash, + } + } + + pub fn total_difficulty(&self) -> Option { + match self { + Block::PoW(block) => Some(block.total_difficulty), + Block::PoS(_) => None, + } + } + + pub fn as_execution_block(&self, total_difficulty: Uint256) -> ExecutionBlock { + match self { + Block::PoW(block) => ExecutionBlock { + block_hash: block.block_hash, + block_number: block.block_number, + parent_hash: block.parent_hash, + total_difficulty: block.total_difficulty, + }, + Block::PoS(payload) => ExecutionBlock { + block_hash: payload.block_hash, + block_number: payload.block_number, + parent_hash: payload.parent_hash, + total_difficulty, + }, + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize, TreeHash)] +#[serde(rename_all = "camelCase")] +pub struct PoWBlock { + pub block_number: u64, + pub block_hash: Hash256, + pub parent_hash: Hash256, + pub total_difficulty: Uint256, +} + +pub struct ExecutionBlockGenerator { + /* + * Common database + */ + blocks: HashMap>, + block_hashes: HashMap, + /* + * PoW block parameters + */ + pub terminal_total_difficulty: Uint256, + pub terminal_block_number: u64, + pub terminal_block_hash: Hash256, + /* + * PoS block parameters + */ + pub pending_payloads: HashMap>, + pub next_payload_id: u64, + pub payload_ids: HashMap>, +} + +impl ExecutionBlockGenerator { + pub fn new( + terminal_total_difficulty: Uint256, + terminal_block_number: u64, + terminal_block_hash: Hash256, + ) -> Self { + let mut gen = Self { + blocks: <_>::default(), + block_hashes: <_>::default(), + terminal_total_difficulty, + terminal_block_number, + terminal_block_hash, + pending_payloads: <_>::default(), + next_payload_id: 0, + payload_ids: <_>::default(), + }; + + gen.insert_pow_block(0).unwrap(); + + gen + } + + pub fn latest_block(&self) -> Option> { + let hash = *self + .block_hashes + .iter() + .max_by_key(|(number, _)| *number) + .map(|(_, hash)| hash)?; + + self.block_by_hash(hash) + } + + pub fn latest_execution_block(&self) -> Option { + self.latest_block() + .map(|block| block.as_execution_block(self.terminal_total_difficulty)) + } + + pub fn block_by_number(&self, number: u64) -> Option> { + let hash = *self.block_hashes.get(&number)?; + self.block_by_hash(hash) + } + + pub fn execution_block_by_number(&self, number: u64) -> Option { + self.block_by_number(number) + .map(|block| block.as_execution_block(self.terminal_total_difficulty)) + } + + pub fn block_by_hash(&self, hash: Hash256) -> Option> { + self.blocks.get(&hash).cloned() + } + + pub fn execution_block_by_hash(&self, hash: Hash256) -> Option { + self.block_by_hash(hash) + .map(|block| block.as_execution_block(self.terminal_total_difficulty)) + } + + pub fn move_to_block_prior_to_terminal_block(&mut self) -> Result<(), String> { + let target_block = self + .terminal_block_number + .checked_sub(1) + .ok_or("terminal pow block is 0")?; + self.move_to_pow_block(target_block) + } + + pub fn move_to_terminal_block(&mut self) -> Result<(), String> { + self.move_to_pow_block(self.terminal_block_number) + } + + pub fn move_to_pow_block(&mut self, target_block: u64) -> Result<(), String> { + let next_block = self.latest_block().unwrap().block_number() + 1; + assert!(target_block >= next_block); + + self.insert_pow_blocks(next_block..=target_block) + } + + pub fn drop_all_blocks(&mut self) { + self.blocks = <_>::default(); + self.block_hashes = <_>::default(); + } + + pub fn insert_pow_blocks( + &mut self, + block_numbers: impl Iterator, + ) -> Result<(), String> { + for i in block_numbers { + self.insert_pow_block(i)?; + } + + Ok(()) + } + + pub fn insert_pow_block(&mut self, block_number: u64) -> Result<(), String> { + let parent_hash = if block_number == 0 { + Hash256::zero() + } else if let Some(hash) = self.block_hashes.get(&(block_number - 1)) { + *hash + } else { + return Err(format!( + "parent with block number {} not found", + block_number - 1 + )); + }; + + let block = generate_pow_block( + self.terminal_total_difficulty, + self.terminal_block_number, + block_number, + parent_hash, + )?; + + self.insert_block(Block::PoW(block)) + } + + pub fn insert_block(&mut self, block: Block) -> Result<(), String> { + if self.blocks.contains_key(&block.block_hash()) { + return Err(format!("{:?} is already known", block.block_hash())); + } else if self.block_hashes.contains_key(&block.block_number()) { + return Err(format!( + "block {} is already known, forking is not supported", + block.block_number() + )); + } else if block.block_number() != 0 && !self.blocks.contains_key(&block.parent_hash()) { + return Err(format!("parent block {:?} is unknown", block.parent_hash())); + } + + self.insert_block_without_checks(block) + } + + pub fn insert_block_without_checks(&mut self, block: Block) -> Result<(), String> { + self.block_hashes + .insert(block.block_number(), block.block_hash()); + self.blocks.insert(block.block_hash(), block); + + Ok(()) + } + + pub fn get_payload(&mut self, id: &PayloadId) -> Option> { + self.payload_ids.remove(id) + } + + pub fn execute_payload(&mut self, payload: ExecutionPayload) -> ExecutePayloadResponse { + let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash) { + parent + } else { + return ExecutePayloadResponse { + status: ExecutePayloadResponseStatus::Syncing, + latest_valid_hash: None, + validation_error: None, + }; + }; + + if payload.block_number != parent.block_number() + 1 { + return ExecutePayloadResponse { + status: ExecutePayloadResponseStatus::Invalid, + latest_valid_hash: Some(parent.block_hash()), + validation_error: Some("invalid block number".to_string()), + }; + } + + let valid_hash = payload.block_hash; + self.pending_payloads.insert(payload.block_hash, payload); + + ExecutePayloadResponse { + status: ExecutePayloadResponseStatus::Valid, + latest_valid_hash: Some(valid_hash), + validation_error: None, + } + } + + pub fn forkchoice_updated_v1( + &mut self, + forkchoice_state: ForkChoiceState, + payload_attributes: Option, + ) -> Result, String> { + if let Some(payload) = self + .pending_payloads + .remove(&forkchoice_state.head_block_hash) + { + self.insert_block(Block::PoS(payload))?; + } + if !self.blocks.contains_key(&forkchoice_state.head_block_hash) { + return Err(format!( + "block hash {:?} unknown", + forkchoice_state.head_block_hash + )); + } + if !self.blocks.contains_key(&forkchoice_state.safe_block_hash) { + return Err(format!( + "block hash {:?} unknown", + forkchoice_state.head_block_hash + )); + } + + if forkchoice_state.finalized_block_hash != Hash256::zero() + && !self + .blocks + .contains_key(&forkchoice_state.finalized_block_hash) + { + return Err(format!( + "finalized block hash {:?} is unknown", + forkchoice_state.finalized_block_hash + )); + } + + match payload_attributes { + None => Ok(None), + Some(attributes) => { + if !self.blocks.iter().any(|(_, block)| { + block.block_hash() == self.terminal_block_hash + || block.block_number() == self.terminal_block_number + }) { + return Err("refusing to create payload id before terminal block".to_string()); + } + + let parent = self + .blocks + .get(&forkchoice_state.head_block_hash) + .ok_or_else(|| { + format!( + "unknown parent block {:?}", + forkchoice_state.head_block_hash + ) + })?; + + let id = payload_id_from_u64(self.next_payload_id); + self.next_payload_id += 1; + + let mut execution_payload = ExecutionPayload { + parent_hash: forkchoice_state.head_block_hash, + fee_recipient: attributes.suggested_fee_recipient, + receipt_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + random: attributes.random, + block_number: parent.block_number() + 1, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: attributes.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: Hash256::zero(), + transactions: vec![].into(), + }; + + execution_payload.block_hash = execution_payload.tree_hash_root(); + + self.payload_ids.insert(id, execution_payload); + + Ok(Some(id)) + } + } + } +} + +fn payload_id_from_u64(n: u64) -> PayloadId { + n.to_le_bytes() +} + +pub fn generate_pow_block( + terminal_total_difficulty: Uint256, + terminal_block_number: u64, + block_number: u64, + parent_hash: Hash256, +) -> Result { + if block_number > terminal_block_number { + return Err(format!( + "{} is beyond terminal pow block {}", + block_number, terminal_block_number + )); + } + + let total_difficulty = if block_number == terminal_block_number { + terminal_total_difficulty + } else { + let increment = terminal_total_difficulty + .checked_div(Uint256::from(terminal_block_number)) + .expect("terminal block number must be non-zero"); + increment + .checked_mul(Uint256::from(block_number)) + .expect("overflow computing total difficulty") + }; + + let mut block = PoWBlock { + block_number, + block_hash: Hash256::zero(), + parent_hash, + total_difficulty, + }; + + block.block_hash = block.tree_hash_root(); + + Ok(block) +} + +#[cfg(test)] +mod test { + use super::*; + use types::MainnetEthSpec; + + #[test] + fn pow_chain_only() { + const TERMINAL_DIFFICULTY: u64 = 10; + const TERMINAL_BLOCK: u64 = 10; + const DIFFICULTY_INCREMENT: u64 = 1; + + let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( + TERMINAL_DIFFICULTY.into(), + TERMINAL_BLOCK, + Hash256::zero(), + ); + + for i in 0..=TERMINAL_BLOCK { + if i > 0 { + generator.insert_pow_block(i).unwrap(); + } + + /* + * Generate a block, inspect it. + */ + + let block = generator.latest_block().unwrap(); + assert_eq!(block.block_number(), i); + + let expected_parent = i + .checked_sub(1) + .map(|i| generator.block_by_number(i).unwrap().block_hash()) + .unwrap_or_else(Hash256::zero); + assert_eq!(block.parent_hash(), expected_parent); + + assert_eq!( + block.total_difficulty().unwrap(), + (i * DIFFICULTY_INCREMENT).into() + ); + + assert_eq!(generator.block_by_hash(block.block_hash()).unwrap(), block); + assert_eq!(generator.block_by_number(i).unwrap(), block); + + /* + * Check the parent is accessible. + */ + + if let Some(prev_i) = i.checked_sub(1) { + assert_eq!( + generator.block_by_number(prev_i).unwrap(), + generator.block_by_hash(block.parent_hash()).unwrap() + ); + } + + /* + * Check the next block is inaccessible. + */ + + let next_i = i + 1; + assert!(generator.block_by_number(next_i).is_none()); + } + } +} diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs new file mode 100644 index 0000000000..131bc8ba0a --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -0,0 +1,126 @@ +use super::Context; +use crate::engine_api::{http::*, ExecutePayloadResponse, ExecutePayloadResponseStatus}; +use crate::json_structures::*; +use serde::de::DeserializeOwned; +use serde_json::Value as JsonValue; +use std::sync::Arc; +use types::EthSpec; + +pub async fn handle_rpc( + body: JsonValue, + ctx: Arc>, +) -> Result { + let method = body + .get("method") + .and_then(JsonValue::as_str) + .ok_or_else(|| "missing/invalid method field".to_string())?; + + let params = body + .get("params") + .ok_or_else(|| "missing/invalid params field".to_string())?; + + match method { + ETH_SYNCING => Ok(JsonValue::Bool(false)), + ETH_GET_BLOCK_BY_NUMBER => { + let tag = params + .get(0) + .and_then(JsonValue::as_str) + .ok_or_else(|| "missing/invalid params[0] value".to_string())?; + + match tag { + "latest" => Ok(serde_json::to_value( + ctx.execution_block_generator + .read() + .latest_execution_block(), + ) + .unwrap()), + other => Err(format!("The tag {} is not supported", other)), + } + } + ETH_GET_BLOCK_BY_HASH => { + let hash = params + .get(0) + .and_then(JsonValue::as_str) + .ok_or_else(|| "missing/invalid params[0] value".to_string()) + .and_then(|s| { + s.parse() + .map_err(|e| format!("unable to parse hash: {:?}", e)) + })?; + + Ok(serde_json::to_value( + ctx.execution_block_generator + .read() + .execution_block_by_hash(hash), + ) + .unwrap()) + } + ENGINE_EXECUTE_PAYLOAD_V1 => { + let request: JsonExecutionPayloadV1 = get_param(params, 0)?; + + let response = if let Some(status) = *ctx.static_execute_payload_response.lock() { + match status { + ExecutePayloadResponseStatus::Valid => ExecutePayloadResponse { + status, + latest_valid_hash: Some(request.block_hash), + validation_error: None, + }, + ExecutePayloadResponseStatus::Syncing => ExecutePayloadResponse { + status, + latest_valid_hash: None, + validation_error: None, + }, + _ => unimplemented!("invalid static executePayloadResponse"), + } + } else { + ctx.execution_block_generator + .write() + .execute_payload(request.into()) + }; + + Ok(serde_json::to_value(JsonExecutePayloadV1Response::from(response)).unwrap()) + } + ENGINE_GET_PAYLOAD_V1 => { + let request: JsonPayloadIdRequest = get_param(params, 0)?; + let id = request.into(); + + let response = ctx + .execution_block_generator + .write() + .get_payload(&id) + .ok_or_else(|| format!("no payload for id {:?}", id))?; + + Ok(serde_json::to_value(JsonExecutionPayloadV1::from(response)).unwrap()) + } + ENGINE_FORKCHOICE_UPDATED_V1 => { + let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; + let payload_attributes: Option = get_param(params, 1)?; + let id = ctx + .execution_block_generator + .write() + .forkchoice_updated_v1( + forkchoice_state.into(), + payload_attributes.map(|json| json.into()), + )?; + + Ok(serde_json::to_value(JsonForkchoiceUpdatedV1Response { + status: JsonForkchoiceUpdatedV1ResponseStatus::Success, + payload_id: id.map(Into::into), + }) + .unwrap()) + } + other => Err(format!( + "The method {} does not exist/is not available", + other + )), + } +} + +fn get_param(params: &JsonValue, index: usize) -> Result { + params + .get(index) + .ok_or_else(|| format!("missing/invalid params[{}] value", index)) + .and_then(|param| { + serde_json::from_value(param.clone()) + .map_err(|e| format!("failed to deserialize param[{}]: {:?}", index, e)) + }) +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs new file mode 100644 index 0000000000..59345bc01f --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -0,0 +1,196 @@ +use crate::{ + test_utils::{MockServer, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY}, + *, +}; +use environment::null_logger; +use sensitive_url::SensitiveUrl; +use std::sync::Arc; +use task_executor::TaskExecutor; +use types::{Address, ChainSpec, Epoch, EthSpec, Hash256, Uint256}; + +pub struct ExecutionLayerRuntime { + pub runtime: Option>, + pub _runtime_shutdown: exit_future::Signal, + pub task_executor: TaskExecutor, + pub log: Logger, +} + +impl Default for ExecutionLayerRuntime { + fn default() -> Self { + let runtime = Arc::new( + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(), + ); + let (runtime_shutdown, exit) = exit_future::signal(); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let log = null_logger().unwrap(); + let task_executor = + TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + + Self { + runtime: Some(runtime), + _runtime_shutdown: runtime_shutdown, + task_executor, + log, + } + } +} + +impl Drop for ExecutionLayerRuntime { + fn drop(&mut self) { + if let Some(runtime) = self.runtime.take() { + Arc::try_unwrap(runtime).unwrap().shutdown_background() + } + } +} + +pub struct MockExecutionLayer { + pub server: MockServer, + pub el: ExecutionLayer, + pub el_runtime: ExecutionLayerRuntime, + pub spec: ChainSpec, +} + +impl MockExecutionLayer { + pub fn default_params() -> Self { + Self::new( + DEFAULT_TERMINAL_DIFFICULTY.into(), + DEFAULT_TERMINAL_BLOCK, + Hash256::zero(), + Epoch::new(0), + ) + } + + pub fn new( + terminal_total_difficulty: Uint256, + terminal_block: u64, + terminal_block_hash: Hash256, + terminal_block_hash_activation_epoch: Epoch, + ) -> Self { + let el_runtime = ExecutionLayerRuntime::default(); + let handle = el_runtime.runtime.as_ref().unwrap().handle(); + + let mut spec = T::default_spec(); + spec.terminal_total_difficulty = terminal_total_difficulty; + spec.terminal_block_hash = terminal_block_hash; + spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch; + + let server = MockServer::new( + handle, + terminal_total_difficulty, + terminal_block, + terminal_block_hash, + ); + + let url = SensitiveUrl::parse(&server.url()).unwrap(); + + let el = ExecutionLayer::from_urls( + vec![url], + Some(Address::repeat_byte(42)), + el_runtime.task_executor.clone(), + el_runtime.log.clone(), + ) + .unwrap(); + + Self { + server, + el, + el_runtime, + spec, + } + } + + pub async fn produce_valid_execution_payload_on_head(self) -> Self { + let latest_execution_block = { + let block_gen = self.server.execution_block_generator(); + block_gen.latest_block().unwrap() + }; + + let parent_hash = latest_execution_block.block_hash(); + let block_number = latest_execution_block.block_number() + 1; + let timestamp = block_number; + let random = Hash256::from_low_u64_be(block_number); + let finalized_block_hash = parent_hash; + + self.el + .notify_forkchoice_updated( + parent_hash, + Hash256::zero(), + Some(PayloadAttributes { + timestamp, + random, + suggested_fee_recipient: Address::repeat_byte(42), + }), + ) + .await + .unwrap(); + + let payload = self + .el + .get_payload::(parent_hash, timestamp, random, finalized_block_hash) + .await + .unwrap(); + let block_hash = payload.block_hash; + assert_eq!(payload.parent_hash, parent_hash); + assert_eq!(payload.block_number, block_number); + assert_eq!(payload.timestamp, timestamp); + assert_eq!(payload.random, random); + + let (payload_response, latest_valid_hash) = + self.el.execute_payload(&payload).await.unwrap(); + assert_eq!(payload_response, ExecutePayloadResponseStatus::Valid); + assert_eq!(latest_valid_hash, Some(payload.block_hash)); + + self.el + .notify_forkchoice_updated(block_hash, Hash256::zero(), None) + .await + .unwrap(); + + let head_execution_block = { + let block_gen = self.server.execution_block_generator(); + block_gen.latest_block().unwrap() + }; + + assert_eq!(head_execution_block.block_number(), block_number); + assert_eq!(head_execution_block.block_hash(), block_hash); + assert_eq!(head_execution_block.parent_hash(), parent_hash); + + self + } + + pub fn move_to_block_prior_to_terminal_block(self) -> Self { + self.server + .execution_block_generator() + .move_to_block_prior_to_terminal_block() + .unwrap(); + self + } + + pub fn move_to_terminal_block(self) -> Self { + self.server + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + self + } + + pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self + where + U: Fn(ChainSpec, ExecutionLayer, Option) -> V, + V: Future, + { + let terminal_block_number = self + .server + .execution_block_generator() + .terminal_block_number; + let terminal_block = self + .server + .execution_block_generator() + .execution_block_by_number(terminal_block_number); + + func(self.spec.clone(), self.el.clone(), terminal_block).await; + self + } +} diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs new file mode 100644 index 0000000000..cd45d34a1f --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -0,0 +1,315 @@ +//! Provides a mock execution engine HTTP JSON-RPC API for use in testing. + +use crate::engine_api::http::JSONRPC_VERSION; +use crate::engine_api::ExecutePayloadResponseStatus; +use bytes::Bytes; +use environment::null_logger; +use execution_block_generator::{Block, PoWBlock}; +use handle_rpc::handle_rpc; +use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use slog::{info, Logger}; +use std::future::Future; +use std::marker::PhantomData; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::sync::Arc; +use tokio::{runtime, sync::oneshot}; +use types::{EthSpec, Hash256, Uint256}; +use warp::Filter; + +pub use execution_block_generator::{generate_pow_block, ExecutionBlockGenerator}; +pub use mock_execution_layer::{ExecutionLayerRuntime, MockExecutionLayer}; + +pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; +pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; + +mod execution_block_generator; +mod handle_rpc; +mod mock_execution_layer; + +pub struct MockServer { + _shutdown_tx: oneshot::Sender<()>, + listen_socket_addr: SocketAddr, + last_echo_request: Arc>>, + pub ctx: Arc>, +} + +impl MockServer { + pub fn unit_testing() -> Self { + Self::new( + &runtime::Handle::current(), + DEFAULT_TERMINAL_DIFFICULTY.into(), + DEFAULT_TERMINAL_BLOCK, + Hash256::zero(), + ) + } + + pub fn new( + handle: &runtime::Handle, + terminal_difficulty: Uint256, + terminal_block: u64, + terminal_block_hash: Hash256, + ) -> Self { + let last_echo_request = Arc::new(RwLock::new(None)); + let preloaded_responses = Arc::new(Mutex::new(vec![])); + let execution_block_generator = + ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash); + + let ctx: Arc> = Arc::new(Context { + config: <_>::default(), + log: null_logger().unwrap(), + last_echo_request: last_echo_request.clone(), + execution_block_generator: RwLock::new(execution_block_generator), + preloaded_responses, + static_execute_payload_response: <_>::default(), + _phantom: PhantomData, + }); + + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + + let shutdown_future = async { + // Ignore the result from the channel, shut down regardless. + let _ = shutdown_rx.await; + }; + + // The `serve` function will panic unless it's run inside a tokio runtime, so use `block_on` + // if we're not in a runtime. However, we can't *always* use `block_on` since tokio will + // panic if we try to block inside an async context. + let serve = || serve(ctx.clone(), shutdown_future).unwrap(); + let (listen_socket_addr, server_future) = if runtime::Handle::try_current().is_err() { + handle.block_on(async { serve() }) + } else { + serve() + }; + + handle.spawn(server_future); + + Self { + _shutdown_tx: shutdown_tx, + listen_socket_addr, + last_echo_request, + ctx, + } + } + + pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator> { + self.ctx.execution_block_generator.write() + } + + pub fn url(&self) -> String { + format!( + "http://{}:{}", + self.listen_socket_addr.ip(), + self.listen_socket_addr.port() + ) + } + + pub fn last_echo_request(&self) -> Bytes { + self.last_echo_request + .write() + .take() + .expect("last echo request is none") + } + + pub fn push_preloaded_response(&self, response: serde_json::Value) { + self.ctx.preloaded_responses.lock().push(response) + } + + pub fn all_payloads_valid(&self) { + *self.ctx.static_execute_payload_response.lock() = Some(ExecutePayloadResponseStatus::Valid) + } + + pub fn insert_pow_block( + &self, + block_number: u64, + block_hash: Hash256, + parent_hash: Hash256, + total_difficulty: Uint256, + ) { + let block = Block::PoW(PoWBlock { + block_number, + block_hash, + parent_hash, + total_difficulty, + }); + + self.ctx + .execution_block_generator + .write() + // The EF tests supply blocks out of order, so we must import them "without checks" and + // trust they form valid chains. + .insert_block_without_checks(block) + .unwrap() + } + + pub fn get_block(&self, block_hash: Hash256) -> Option> { + self.ctx + .execution_block_generator + .read() + .block_by_hash(block_hash) + } + + pub fn drop_all_blocks(&self) { + self.ctx.execution_block_generator.write().drop_all_blocks() + } +} + +#[derive(Debug)] +pub enum Error { + Warp(warp::Error), + Other(String), +} + +impl From for Error { + fn from(e: warp::Error) -> Self { + Error::Warp(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} + +#[derive(Debug)] +struct MissingIdField; + +impl warp::reject::Reject for MissingIdField {} + +/// A wrapper around all the items required to spawn the HTTP server. +/// +/// The server will gracefully handle the case where any fields are `None`. +pub struct Context { + pub config: Config, + pub log: Logger, + pub last_echo_request: Arc>>, + pub execution_block_generator: RwLock>, + pub preloaded_responses: Arc>>, + pub static_execute_payload_response: Arc>>, + pub _phantom: PhantomData, +} + +/// Configuration for the HTTP server. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub listen_addr: Ipv4Addr, + pub listen_port: u16, +} + +impl Default for Config { + fn default() -> Self { + Self { + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + } + } +} + +/// Creates a server that will serve requests using information from `ctx`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the provided address and then return a tuple of: +/// +/// - `SocketAddr`: the address that the HTTP server will listen on. +/// - `Future`: the actual server future that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn serve( + ctx: Arc>, + shutdown: impl Future + Send + Sync + 'static, +) -> Result<(SocketAddr, impl Future), Error> { + let config = &ctx.config; + let log = ctx.log.clone(); + + let inner_ctx = ctx.clone(); + let ctx_filter = warp::any().map(move || inner_ctx.clone()); + + // `/` + // + // Handles actual JSON-RPC requests. + let root = warp::path::end() + .and(warp::body::json()) + .and(ctx_filter.clone()) + .and_then(|body: serde_json::Value, ctx: Arc>| async move { + let id = body + .get("id") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| warp::reject::custom(MissingIdField))?; + + let preloaded_response = { + let mut preloaded_responses = ctx.preloaded_responses.lock(); + if !preloaded_responses.is_empty() { + Some(preloaded_responses.remove(0)) + } else { + None + } + }; + + let response = if let Some(preloaded_response) = preloaded_response { + preloaded_response + } else { + match handle_rpc(body, ctx).await { + Ok(result) => json!({ + "id": id, + "jsonrpc": JSONRPC_VERSION, + "result": result + }), + Err(message) => json!({ + "id": id, + "jsonrpc": JSONRPC_VERSION, + "error": { + "code": -1234, // Junk error code. + "message": message + } + }), + } + }; + + Ok::<_, warp::reject::Rejection>( + warp::http::Response::builder() + .status(200) + .body(serde_json::to_string(&response).expect("response must be valid JSON")), + ) + }); + + // `/echo` + // + // Sends the body of the request to `ctx.last_echo_request` so we can inspect requests. + let echo = warp::path("echo") + .and(warp::body::bytes()) + .and(ctx_filter) + .and_then(|bytes: Bytes, ctx: Arc>| async move { + *ctx.last_echo_request.write() = Some(bytes.clone()); + Ok::<_, warp::reject::Rejection>( + warp::http::Response::builder().status(200).body(bytes), + ) + }); + + let routes = warp::post() + .and(root.or(echo)) + // Add a `Server` header. + .map(|reply| warp::reply::with_header(reply, "Server", "lighthouse-mock-execution-client")); + + let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddrV4::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )?; + + info!( + log, + "Metrics HTTP server started"; + "listen_address" => listening_socket.to_string(), + ); + + Ok((listening_socket, server)) +} diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 337aea8b28..778e0a4ca6 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -16,9 +16,9 @@ eth1 = { path = "../eth1"} rayon = "1.4.1" state_processing = { path = "../../consensus/state_processing" } merkle_proof = { path = "../../consensus/merkle_proof" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_hashing = "0.2.0" -tree_hash = "0.4.0" +tree_hash = "0.4.1" tokio = { version = "1.14.0", features = ["full"] } slog = "2.5.2" int_to_bytes = { path = "../../consensus/int_to_bytes" } diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 8a5bbd0b16..aac13a324f 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -373,6 +373,7 @@ impl Eth1GenesisService { eth1_block.hash, eth1_block.timestamp, genesis_deposits(deposit_logs, spec)?, + None, spec, ) .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index e36c115b47..d8c25baec8 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -3,7 +3,12 @@ use eth2_hashing::hash; use rayon::prelude::*; use ssz::Encode; use state_processing::initialize_beacon_state_from_eth1; -use types::{BeaconState, ChainSpec, DepositData, EthSpec, Hash256, Keypair, PublicKey, Signature}; +use types::{ + BeaconState, ChainSpec, DepositData, EthSpec, ExecutionPayloadHeader, Hash256, Keypair, + PublicKey, Signature, +}; + +pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// @@ -12,9 +17,10 @@ use types::{BeaconState, ChainSpec, DepositData, EthSpec, Hash256, Keypair, Publ pub fn interop_genesis_state( keypairs: &[Keypair], genesis_time: u64, + eth1_block_hash: Hash256, + execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { - let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; @@ -44,6 +50,7 @@ pub fn interop_genesis_state( eth1_block_hash, eth1_timestamp, genesis_deposits(datas, spec)?, + execution_payload_header, spec, ) .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; @@ -73,8 +80,14 @@ mod test { let keypairs = generate_deterministic_keypairs(validator_count); - let state = interop_genesis_state::(&keypairs, genesis_time, spec) - .expect("should build state"); + let state = interop_genesis_state::( + &keypairs, + genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + spec, + ) + .expect("should build state"); assert_eq!( state.eth1_data().block_hash, diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 5390e30d9f..ccf8fe10c9 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -4,5 +4,5 @@ mod interop; pub use eth1::Config as Eth1Config; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; -pub use interop::interop_genesis_state; +pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 9afbf15972..85bdbad51f 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -24,14 +24,14 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" warp_utils = { path = "../../common/warp_utils" } slot_clock = { path = "../../common/slot_clock" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" bs58 = "0.4.0" futures = "0.3.8" [dev-dependencies] store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -tree_hash = "0.4.0" +tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } [[test]] diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 64d9b9e841..b0907a30c1 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -20,7 +20,7 @@ use beacon_chain::{ observed_operations::ObservationOutcome, validator_monitor::{get_block_delay_ms, timestamp_now}, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - WhenSlotSkipped, + HeadSafetyStatus, WhenSlotSkipped, }; use block_id::BlockId; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; @@ -55,7 +55,10 @@ use warp::http::StatusCode; use warp::sse::Event; use warp::Reply; use warp::{http::Response, Filter}; -use warp_utils::task::{blocking_json_task, blocking_task}; +use warp_utils::{ + query::multi_key_query, + task::{blocking_json_task, blocking_task}, +}; const API_PREFIX: &str = "eth"; @@ -97,6 +100,7 @@ pub struct Config { pub allow_origin: Option, pub serve_legacy_spec: bool, pub tls_config: Option, + pub allow_sync_stalled: bool, } impl Default for Config { @@ -108,6 +112,7 @@ impl Default for Config { allow_origin: None, serve_legacy_spec: true, tls_config: None, + allow_sync_stalled: false, } } } @@ -237,6 +242,7 @@ pub fn serve( shutdown: impl Future + Send + Sync + 'static, ) -> Result { let config = ctx.config.clone(); + let allow_sync_stalled = config.allow_sync_stalled; let log = ctx.log.clone(); // Configure CORS. @@ -337,44 +343,78 @@ pub fn serve( } }); - // Create a `warp` filter that rejects request whilst the node is syncing. - let not_while_syncing_filter = warp::any() - .and(network_globals.clone()) - .and(chain_filter.clone()) - .and_then( - |network_globals: Arc>, chain: Arc>| async move { - match *network_globals.sync_state.read() { - SyncState::SyncingFinalized { .. } => { - let head_slot = chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)?; + // Create a `warp` filter that rejects requests whilst the node is syncing. + let not_while_syncing_filter = + warp::any() + .and(network_globals.clone()) + .and(chain_filter.clone()) + .and_then( + move |network_globals: Arc>, + chain: Arc>| async move { + match *network_globals.sync_state.read() { + SyncState::SyncingFinalized { .. } => { + let head_slot = chain + .best_slot() + .map_err(warp_utils::reject::beacon_chain_error)?; - let current_slot = chain - .slot_clock - .now_or_genesis() - .ok_or_else(|| { - warp_utils::reject::custom_server_error( - "unable to read slot clock".to_string(), - ) - })?; + let current_slot = + chain.slot_clock.now_or_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ) + })?; - let tolerance = SYNC_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch(); + let tolerance = SYNC_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch(); - if head_slot + tolerance >= current_slot { - Ok(()) - } else { - Err(warp_utils::reject::not_synced(format!( - "head slot is {}, current slot is {}", - head_slot, current_slot - ))) + if head_slot + tolerance >= current_slot { + Ok(()) + } else { + Err(warp_utils::reject::not_synced(format!( + "head slot is {}, current slot is {}", + head_slot, current_slot + ))) + } } + SyncState::SyncingHead { .. } + | SyncState::SyncTransition + | SyncState::BackFillSyncing { .. } => Ok(()), + SyncState::Synced => Ok(()), + SyncState::Stalled if allow_sync_stalled => Ok(()), + SyncState::Stalled => Err(warp_utils::reject::not_synced( + "sync is stalled".to_string(), + )), } - SyncState::SyncingHead { .. } | SyncState::SyncTransition | SyncState::BackFillSyncing { .. } => Ok(()), - SyncState::Synced => Ok(()), - SyncState::Stalled => Err(warp_utils::reject::not_synced( - "sync is stalled".to_string(), - )), + }, + ) + .untuple_one(); + + // Create a `warp` filter that rejects requests unless the head has been verified by the + // execution layer. + let only_with_safe_head = warp::any() + .and(chain_filter.clone()) + .and_then(move |chain: Arc>| async move { + let status = chain.head_safety_status().map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to read head safety status: {:?}", + e + )) + })?; + match status { + HeadSafetyStatus::Safe(_) => Ok(()), + HeadSafetyStatus::Unsafe(hash) => { + Err(warp_utils::reject::custom_server_error(format!( + "optimistic head hash {:?} has not been verified by the execution layer", + hash + ))) } - }, - ) + HeadSafetyStatus::Invalid(hash) => { + Err(warp_utils::reject::custom_server_error(format!( + "the head block has an invalid payload {:?}, this may be unrecoverable", + hash + ))) + } + } + }) .untuple_one(); // Create a `warp` filter that provides access to the logger. @@ -468,12 +508,13 @@ pub fn serve( .clone() .and(warp::path("validator_balances")) .and(warp::path::end()) - .and(warp::query::()) + .and(multi_key_query::()) .and_then( |state_id: StateId, chain: Arc>, - query: api_types::ValidatorBalancesQuery| { + query_res: Result| { blocking_json_task(move || { + let query = query_res?; state_id .map_state(&chain, |state| { Ok(state @@ -484,7 +525,7 @@ pub fn serve( // filter by validator id(s) if provided .filter(|(index, (validator, _))| { query.id.as_ref().map_or(true, |ids| { - ids.0.iter().any(|id| match id { + ids.iter().any(|id| match id { ValidatorId::PublicKey(pubkey) => { &validator.pubkey == pubkey } @@ -511,11 +552,14 @@ pub fn serve( let get_beacon_state_validators = beacon_states_path .clone() .and(warp::path("validators")) - .and(warp::query::()) .and(warp::path::end()) + .and(multi_key_query::()) .and_then( - |state_id: StateId, chain: Arc>, query: api_types::ValidatorsQuery| { + |state_id: StateId, + chain: Arc>, + query_res: Result| { blocking_json_task(move || { + let query = query_res?; state_id .map_state(&chain, |state| { let epoch = state.current_epoch(); @@ -529,7 +573,7 @@ pub fn serve( // filter by validator id(s) if provided .filter(|(index, (validator, _))| { query.id.as_ref().map_or(true, |ids| { - ids.0.iter().any(|id| match id { + ids.iter().any(|id| match id { ValidatorId::PublicKey(pubkey) => { &validator.pubkey == pubkey } @@ -549,8 +593,8 @@ pub fn serve( let status_matches = query.status.as_ref().map_or(true, |statuses| { - statuses.0.contains(&status) - || statuses.0.contains(&status.superstatus()) + statuses.contains(&status) + || statuses.contains(&status.superstatus()) }); if status_matches { @@ -1646,7 +1690,7 @@ pub fn serve( warp_utils::reject::custom_bad_request("invalid peer id.".to_string()) })?; - if let Some(peer_info) = network_globals.peers().peer_info(&peer_id) { + if let Some(peer_info) = network_globals.peers.read().peer_info(&peer_id) { let address = if let Some(socket_addr) = peer_info.seen_addresses().next() { let mut addr = lighthouse_network::Multiaddr::from(socket_addr.ip()); addr.push(lighthouse_network::multiaddr::Protocol::Tcp( @@ -1684,14 +1728,17 @@ pub fn serve( .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::end()) - .and(warp::query::()) + .and(multi_key_query::()) .and(network_globals.clone()) .and_then( - |query: api_types::PeersQuery, network_globals: Arc>| { + |query_res: Result, + network_globals: Arc>| { blocking_json_task(move || { + let query = query_res?; let mut peers: Vec = Vec::new(); network_globals - .peers() + .peers + .read() .peers() .for_each(|(peer_id, peer_info)| { let address = @@ -1717,11 +1764,11 @@ pub fn serve( ); let state_matches = query.state.as_ref().map_or(true, |states| { - states.0.iter().any(|state_param| *state_param == state) + states.iter().any(|state_param| *state_param == state) }); let direction_matches = query.direction.as_ref().map_or(true, |directions| { - directions.0.iter().any(|dir_param| *dir_param == direction) + directions.iter().any(|dir_param| *dir_param == direction) }); if state_matches && direction_matches { @@ -1758,17 +1805,21 @@ pub fn serve( let mut disconnected: u64 = 0; let mut disconnecting: u64 = 0; - network_globals.peers().peers().for_each(|(_, peer_info)| { - let state = api_types::PeerState::from_peer_connection_status( - peer_info.connection_status(), - ); - match state { - api_types::PeerState::Connected => connected += 1, - api_types::PeerState::Connecting => connecting += 1, - api_types::PeerState::Disconnected => disconnected += 1, - api_types::PeerState::Disconnecting => disconnecting += 1, - } - }); + network_globals + .peers + .read() + .peers() + .for_each(|(_, peer_info)| { + let state = api_types::PeerState::from_peer_connection_status( + peer_info.connection_status(), + ); + match state { + api_types::PeerState::Connected => connected += 1, + api_types::PeerState::Connecting => connecting += 1, + api_types::PeerState::Disconnected => disconnected += 1, + api_types::PeerState::Disconnecting => disconnecting += 1, + } + }); Ok(api_types::GenericResponse::from(api_types::PeerCount { connected, @@ -1845,6 +1896,7 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) + .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAttestationDataQuery, chain: Arc>| { @@ -1877,6 +1929,7 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) + .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc>| { @@ -1947,6 +2000,7 @@ pub fn serve( .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) + .and(only_with_safe_head) .and(chain_filter.clone()) .and_then( |sync_committee_data: SyncContributionData, chain: Arc>| { @@ -2230,6 +2284,22 @@ pub fn serve( }) }); + // GET lighthouse/nat + let get_lighthouse_nat = warp::path("lighthouse") + .and(warp::path("nat")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from( + lighthouse_network::metrics::NAT_OPEN + .as_ref() + .map(|v| v.get()) + .unwrap_or(0) + != 0, + )) + }) + }); + // GET lighthouse/peers let get_lighthouse_peers = warp::path("lighthouse") .and(warp::path("peers")) @@ -2238,7 +2308,8 @@ pub fn serve( .and_then(|network_globals: Arc>| { blocking_json_task(move || { Ok(network_globals - .peers() + .peers + .read() .peers() .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { peer_id: peer_id.to_string(), @@ -2257,7 +2328,8 @@ pub fn serve( .and_then(|network_globals: Arc>| { blocking_json_task(move || { Ok(network_globals - .peers() + .peers + .read() .connected_peers() .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { peer_id: peer_id.to_string(), @@ -2471,16 +2543,18 @@ pub fn serve( let get_events = eth1_v1 .and(warp::path("events")) .and(warp::path::end()) - .and(warp::query::()) + .and(multi_key_query::()) .and(chain_filter) .and_then( - |topics: api_types::EventQuery, chain: Arc>| { + |topics_res: Result, + chain: Arc>| { blocking_task(move || { + let topics = topics_res?; // for each topic subscribed spawn a new subscription - let mut receivers = Vec::with_capacity(topics.topics.0.len()); + let mut receivers = Vec::with_capacity(topics.topics.len()); if let Some(event_handler) = chain.event_handler.as_ref() { - for topic in topics.topics.0.clone() { + for topic in topics.topics { let receiver = match topic { api_types::EventTopic::Head => event_handler.subscribe_head(), api_types::EventTopic::Block => event_handler.subscribe_block(), @@ -2543,8 +2617,8 @@ pub fn serve( .or(get_beacon_state_fork.boxed()) .or(get_beacon_state_finality_checkpoints.boxed()) .or(get_beacon_state_validator_balances.boxed()) - .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_validators_id.boxed()) + .or(get_beacon_state_validators.boxed()) .or(get_beacon_state_committees.boxed()) .or(get_beacon_state_sync_committees.boxed()) .or(get_beacon_headers.boxed()) @@ -2575,6 +2649,7 @@ pub fn serve( .or(get_validator_sync_committee_contribution.boxed()) .or(get_lighthouse_health.boxed()) .or(get_lighthouse_syncing.boxed()) + .or(get_lighthouse_nat.boxed()) .or(get_lighthouse_peers.boxed()) .or(get_lighthouse_peers_connected.boxed()) .or(get_lighthouse_proto_array.boxed()) diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index dd2a40efa6..758c29a60f 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -133,6 +133,7 @@ pub async fn create_api_server( allow_origin: None, serve_legacy_spec: true, tls_config: None, + allow_sync_stalled: false, }, chain: Some(chain.clone()), network_tx: Some(network_tx), diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 95f0871301..878af7a039 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2310,7 +2310,7 @@ impl ApiTester { self.attestations .clone() .into_iter() - .map(|attestation| EventKind::Attestation(attestation)) + .map(|attestation| EventKind::Attestation(Box::new(attestation))) .collect::>() .as_slice() ); diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs index 66c7a6a6f6..89e6a8e2d1 100644 --- a/beacon_node/http_metrics/src/lib.rs +++ b/beacon_node/http_metrics/src/lib.rs @@ -4,6 +4,7 @@ mod metrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use lighthouse_network::open_metrics_client::registry::Registry; use lighthouse_version::version_with_platform; use serde::{Deserialize, Serialize}; use slog::{crit, info, Logger}; @@ -39,6 +40,7 @@ pub struct Context { pub chain: Option>>, pub db_path: Option, pub freezer_db_path: Option, + pub gossipsub_registry: Option>, pub log: Logger, } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index c86211f313..66c961956c 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,6 +1,7 @@ use crate::Context; use beacon_chain::BeaconChainTypes; use lighthouse_metrics::{Encoder, TextEncoder}; +use lighthouse_network::open_metrics_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; pub use lighthouse_metrics::*; @@ -51,6 +52,12 @@ pub fn gather_prometheus_metrics( encoder .encode(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); + // encode gossipsub metrics also if they exist + if let Some(registry) = ctx.gossipsub_registry.as_ref() { + if let Ok(registry_locked) = registry.lock() { + let _ = encode(&mut buffer, ®istry_locked); + } + } String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) } diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs index 633b81115f..fd8733cfe5 100644 --- a/beacon_node/http_metrics/tests/tests.rs +++ b/beacon_node/http_metrics/tests/tests.rs @@ -25,6 +25,7 @@ async fn returns_200_ok() { chain: None, db_path: None, freezer_db_path: None, + gossipsub_registry: None, log, }); diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 1ad3b436d1..31dfab271e 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,14 +5,14 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -discv5 = { version = "0.1.0-beta.11", features = ["libp2p"] } +discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } hashset_delay = { path = "../../common/hashset_delay" } -eth2_ssz_types = "0.2.1" +eth2_ssz_types = "0.2.2" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } @@ -25,7 +25,7 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } smallvec = "1.6.1" tokio-io-timeout = "1.1.1" -lru = "0.6.0" +lru = "0.7.1" parking_lot = "0.11.0" sha2 = "0.9.1" snap = "1.0.1" @@ -37,18 +37,23 @@ rand = "0.7.3" directory = { path = "../../common/directory" } regex = "1.3.9" strum = { version = "0.21.0", features = ["derive"] } -superstruct = "0.2.0" +superstruct = "0.4.0" +open-metrics-client = "0.13.0" [dependencies.libp2p] -version = "0.41.0" +# version = "0.41.0" default-features = false -features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio"] +git = "https://github.com/libp2p/rust-libp2p" +# Latest libp2p master +rev = "17861d9cac121f7e448585a7f052d5eab4618826" +features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext"] [dev-dependencies] slog-term = "2.6.0" slog-async = "2.5.0" tempfile = "3.1.0" exit-future = "0.2.0" +void = "1" [features] libp2p-websocket = [] diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 1276db5e7e..2a79961094 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -2,26 +2,25 @@ use crate::behaviour::gossipsub_scoring_parameters::{ lighthouse_gossip_thresholds, PeerScoreSettings, }; use crate::config::gossipsub_config; -use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS}; +use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent}; use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::rpc::*; -use crate::service::METADATA_FILENAME; +use crate::service::{Context as ServiceContext, METADATA_FILENAME}; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; -use crate::{ - error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, SyncStatus, TopicHash, -}; +use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use libp2p::{ core::{ connection::ConnectionId, identity::Keypair, multiaddr::Protocol as MProtocol, Multiaddr, }, gossipsub::{ + metrics::Config as GossipsubMetricsConfig, subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter}, Gossipsub as BaseGossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, @@ -34,7 +33,7 @@ use libp2p::{ }, NetworkBehaviour, PeerId, }; -use slog::{crit, debug, error, o, trace, warn}; +use slog::{crit, debug, o, trace, warn}; use ssz::Encode; use std::collections::HashSet; use std::fs::File; @@ -47,12 +46,15 @@ use std::{ task::{Context, Poll}, }; use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, ChainSpec, EnrForkId, EthSpec, ForkContext, + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, }; pub mod gossipsub_scoring_parameters; +/// The number of peers we target per subnet for discovery queries. +pub const TARGET_SUBNET_PEERS: usize = 6; + const MAX_IDENTIFY_ADDRESSES: usize = 10; /// Identifier of requests sent by a peer. @@ -184,14 +186,14 @@ pub struct Behaviour { impl Behaviour { pub async fn new( local_key: &Keypair, - mut config: NetworkConfig, + ctx: ServiceContext<'_>, network_globals: Arc>, log: &slog::Logger, - fork_context: Arc, - chain_spec: &ChainSpec, ) -> error::Result { let behaviour_log = log.new(o!()); + let mut config = ctx.config.clone(); + // Set up the Identify Behaviour let identify_config = if config.private { IdentifyConfig::new( @@ -217,25 +219,29 @@ impl Behaviour { .eth2() .expect("Local ENR must have a fork id"); - let possible_fork_digests = fork_context.all_fork_digests(); + let possible_fork_digests = ctx.fork_context.all_fork_digests(); let filter = MaxCountSubscriptionFilter { filter: Self::create_whitelist_filter( possible_fork_digests, - chain_spec.attestation_subnet_count, + ctx.chain_spec.attestation_subnet_count, SYNC_COMMITTEE_SUBNET_COUNT, ), max_subscribed_topics: 200, max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 }; - config.gs_config = gossipsub_config(fork_context.clone()); + config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); + + // If metrics are enabled for gossipsub build the configuration + let gossipsub_metrics = ctx + .gossipsub_registry + .map(|registry| (registry, GossipsubMetricsConfig::default())); - // Build and configure the Gossipsub behaviour let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( MessageAuthenticity::Anonymous, config.gs_config.clone(), - None, // No metrics for the time being + gossipsub_metrics, filter, snappy_transform, ) @@ -248,7 +254,7 @@ impl Behaviour { let thresholds = lighthouse_gossip_thresholds(); - let score_settings = PeerScoreSettings::new(chain_spec, &config.gs_config); + let score_settings = PeerScoreSettings::new(ctx.chain_spec, &config.gs_config); // Prepare scoring parameters let params = score_settings.get_peer_score_params( @@ -269,6 +275,7 @@ impl Behaviour { let peer_manager_cfg = PeerManagerCfg { discovery_enabled: !config.disable_discovery, + metrics_enabled: config.metrics_enabled, target_peer_count: config.target_peers, ..Default::default() }; @@ -276,7 +283,7 @@ impl Behaviour { Ok(Behaviour { // Sub-behaviours gossipsub, - eth2_rpc: RPC::new(fork_context.clone(), log.clone()), + eth2_rpc: RPC::new(ctx.fork_context.clone(), log.clone()), discovery, identify: Identify::new(identify_config), // Auxiliary fields @@ -289,7 +296,7 @@ impl Behaviour { network_dir: config.network_dir.clone(), log: behaviour_log, score_settings, - fork_context, + fork_context: ctx.fork_context, update_gossipsub_scores, }) } @@ -395,14 +402,15 @@ impl Behaviour { .remove(&topic); // unsubscribe from the topic - let topic: Topic = topic.into(); + let libp2p_topic: Topic = topic.clone().into(); - match self.gossipsub.unsubscribe(&topic) { + match self.gossipsub.unsubscribe(&libp2p_topic) { Err(_) => { - warn!(self.log, "Failed to unsubscribe from topic"; "topic" => %topic); + warn!(self.log, "Failed to unsubscribe from topic"; "topic" => %libp2p_topic); false } Ok(v) => { + // Inform the network debug!(self.log, "Unsubscribed to topic"; "topic" => %topic); v } @@ -457,7 +465,8 @@ impl Behaviour { } { if let Some(client) = self .network_globals - .peers() + .peers + .read() .peer_info(propagation_source) .map(|info| info.client().kind.as_ref()) { @@ -569,25 +578,6 @@ impl Behaviour { self.discovery.add_enr(enr); } - pub fn update_peers_sync_status(&mut self, peer_id: &PeerId, sync_status: SyncStatus) { - let status_repr = sync_status.as_str(); - match self - .network_globals - .peers_mut() - .update_sync_status(peer_id, sync_status) - { - Some(true) => { - trace!(self.log, "Peer sync status updated"; "peer_id" => %peer_id, "sync_status" => status_repr); - } - Some(false) => { - // Sync status is the same for known peer - } - None => { - error!(self.log, "Sync status update notification for unknown peer"; "peer_id" => %peer_id, "sync_status" => status_repr); - } - } - } - /// Updates a subnet value to the ENR attnets/syncnets bitfield. /// /// The `value` is `true` if a subnet is being added and false otherwise. @@ -613,7 +603,8 @@ impl Behaviour { // Extend min_ttl of connected peers on required subnets if let Some(min_ttl) = s.min_ttl { self.network_globals - .peers_mut() + .peers + .write() .extend_peers_on_subnet(&s.subnet, min_ttl); if let Subnet::SyncCommittee(sync_subnet) = s.subnet { self.peer_manager_mut() @@ -623,7 +614,8 @@ impl Behaviour { // Already have target number of peers, no need for subnet discovery let peers_on_subnet = self .network_globals - .peers() + .peers + .read() .good_peers_on_subnet(s.subnet) .count(); if peers_on_subnet >= TARGET_SUBNET_PEERS { @@ -750,6 +742,18 @@ impl Behaviour { /// Convenience function to propagate a request. fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { + // Increment metrics + match &request { + Request::Status(_) => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"]) + } + Request::BlocksByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"]) + } + Request::BlocksByRoot { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) + } + } self.add_event(BehaviourEvent::RequestReceived { peer_id, id, @@ -773,7 +777,7 @@ impl Behaviour { .discovery .cached_enrs() .filter_map(|(peer_id, enr)| { - let peers = self.network_globals.peers(); + let peers = self.network_globals.peers.read(); if predicate(enr) && peers.should_dial(peer_id) { Some(*peer_id) } else { @@ -866,14 +870,16 @@ impl NetworkBehaviourEventProcess for Behaviour< GossipsubEvent::Subscribed { peer_id, topic } => { if let Some(subnet_id) = subnet_from_topic_hash(&topic) { self.network_globals - .peers_mut() + .peers + .write() .add_subscription(&peer_id, subnet_id); } } GossipsubEvent::Unsubscribed { peer_id, topic } => { if let Some(subnet_id) = subnet_from_topic_hash(&topic) { self.network_globals - .peers_mut() + .peers + .write() .remove_subscription(&peer_id, &subnet_id); } } @@ -884,6 +890,7 @@ impl NetworkBehaviourEventProcess for Behaviour< PeerAction::LowToleranceError, ReportSource::Gossipsub, Some(GoodbyeReason::Unknown), + "does_not_support_gossipsub", ); } } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index e18fd00aeb..4cafcf62b1 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -16,10 +16,10 @@ use std::sync::Arc; use std::time::Duration; use types::{ForkContext, ForkName}; -/// The maximum transmit size of gossip messages in bytes. -pub const GOSSIP_MAX_SIZE: usize = 1_048_576; -/// This is a constant to be used in discovery. The lower bound of the gossipsub mesh. -pub const MESH_N_LOW: usize = 6; +/// The maximum transmit size of gossip messages in bytes pre-merge. +const GOSSIP_MAX_SIZE: usize = 1_048_576; // 1M +/// The maximum transmit size of gossip messages in bytes post-merge. +const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M /// The cache time is set to accommodate the circulation time of an attestation. /// @@ -40,6 +40,15 @@ pub const DUPLICATE_CACHE_TIME: Duration = Duration::from_secs(33 * 12 + 1); // const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0]; const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0]; +/// The maximum size of gossip messages. +pub fn gossip_max_size(is_merge_enabled: bool) -> usize { + if is_merge_enabled { + GOSSIP_MAX_SIZE_POST_MERGE + } else { + GOSSIP_MAX_SIZE + } +} + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] /// Network configuration for lighthouse. @@ -105,6 +114,10 @@ pub struct Config { /// runtime. pub import_all_attestations: bool, + /// A setting specifying a range of values that tune the network parameters of lighthouse. The + /// lower the value the less bandwidth used, but the slower messages will be received. + pub network_load: u8, + /// Indicates if the user has set the network to be in private mode. Currently this /// prevents sending client identifying information over identify. pub private: bool, @@ -186,6 +199,7 @@ impl Default for Config { client_version: lighthouse_version::version_with_platform(), disable_discovery: false, upnp_enabled: true, + network_load: 3, private: false, subscribe_all_subnets: false, import_all_attestations: false, @@ -196,8 +210,72 @@ impl Default for Config { } } +/// Controls sizes of gossipsub meshes to tune a Lighthouse node's bandwidth/performance. +pub struct NetworkLoad { + pub name: &'static str, + pub mesh_n_low: usize, + pub outbound_min: usize, + pub mesh_n: usize, + pub mesh_n_high: usize, + pub gossip_lazy: usize, + pub history_gossip: usize, +} + +impl From for NetworkLoad { + fn from(load: u8) -> NetworkLoad { + match load { + 1 => NetworkLoad { + name: "Low", + mesh_n_low: 1, + outbound_min: 1, + mesh_n: 3, + mesh_n_high: 4, + gossip_lazy: 3, + history_gossip: 12, + }, + 2 => NetworkLoad { + name: "Low", + mesh_n_low: 2, + outbound_min: 2, + mesh_n: 4, + mesh_n_high: 8, + gossip_lazy: 3, + history_gossip: 12, + }, + 3 => NetworkLoad { + name: "Average", + mesh_n_low: 3, + outbound_min: 2, + mesh_n: 5, + mesh_n_high: 10, + gossip_lazy: 3, + history_gossip: 12, + }, + 4 => NetworkLoad { + name: "Average", + mesh_n_low: 4, + outbound_min: 3, + mesh_n: 8, + mesh_n_high: 12, + gossip_lazy: 3, + history_gossip: 12, + }, + // 5 and above + _ => NetworkLoad { + name: "High", + mesh_n_low: 5, + outbound_min: 3, + mesh_n: 10, + mesh_n_high: 15, + gossip_lazy: 5, + history_gossip: 12, + }, + } + } +} + /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. -pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { +pub fn gossipsub_config(network_load: u8, fork_context: Arc) -> GossipsubConfig { // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(data) for content addressing let fast_gossip_message_id = @@ -209,7 +287,9 @@ pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); match fork_context.current_fork() { - ForkName::Altair => { + // according to: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#the-gossip-domain-gossipsub + // the derivation of the message-id remains the same in the merge + ForkName::Altair | ForkName::Merge => { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), @@ -229,6 +309,7 @@ pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { } } + let is_merge_enabled = fork_context.fork_exists(ForkName::Merge); let gossip_message_id = move |message: &GossipsubMessage| { MessageId::from( &Sha256::digest( @@ -236,17 +317,21 @@ pub fn gossipsub_config(fork_context: Arc) -> GossipsubConfig { )[..20], ) }; + + let load = NetworkLoad::from(network_load); + GossipsubConfigBuilder::default() - .max_transmit_size(GOSSIP_MAX_SIZE) + .max_transmit_size(gossip_max_size(is_merge_enabled)) .heartbeat_interval(Duration::from_millis(700)) - .mesh_n(8) - .mesh_n_low(MESH_N_LOW) - .mesh_n_high(12) - .gossip_lazy(6) + .mesh_n(load.mesh_n) + .mesh_n_low(load.mesh_n_low) + .mesh_outbound_min(load.outbound_min) + .mesh_n_high(load.mesh_n_high) + .gossip_lazy(load.gossip_lazy) .fanout_ttl(Duration::from_secs(60)) .history_length(12) .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large - .history_gossip(3) + .history_gossip(load.history_gossip) .validate_messages() // require validation before propagation .validation_mode(ValidationMode::Anonymous) .duplicate_cache_time(DUPLICATE_CACHE_TIME) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 3f2ae759b7..1d542a7f39 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -127,7 +127,7 @@ pub fn use_or_load_enr( pub fn build_or_load_enr( local_key: Keypair, config: &NetworkConfig, - enr_fork_id: EnrForkId, + enr_fork_id: &EnrForkId, log: &slog::Logger, ) -> Result { // Build the local ENR. @@ -163,7 +163,7 @@ pub fn create_enr_builder_from_config( pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, - enr_fork_id: EnrForkId, + enr_fork_id: &EnrForkId, ) -> Result { let mut builder = create_enr_builder_from_config(config, true); diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index eeff19942f..34c29a44d1 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -7,7 +7,8 @@ pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder -use crate::{config, metrics}; +use crate::behaviour::TARGET_SUBNET_PEERS; +use crate::metrics; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ @@ -47,8 +48,6 @@ pub use subnet_predicate::subnet_predicate; /// Local ENR storage filename. pub const ENR_FILENAME: &str = "enr.dat"; -/// Target number of peers we'd like to have connected to a given long-lived subnet. -pub const TARGET_SUBNET_PEERS: usize = config::MESH_N_LOW; /// Target number of peers to search for given a grouped subnet query. const TARGET_PEERS_FOR_GROUPED_QUERY: usize = 6; /// Number of times to attempt a discovery request. @@ -563,7 +562,6 @@ impl Discovery { pub fn unban_peer(&mut self, peer_id: &PeerId, ip_addresses: Vec) { // first try and convert the peer_id to a node_id. if let Ok(node_id) = peer_id_to_node_id(peer_id) { - // If we could convert this peer id, remove it from the DHT and ban it from discovery. self.discv5.ban_node_remove(&node_id); } @@ -679,7 +677,8 @@ impl Discovery { // Determine if we have sufficient peers, which may make this discovery unnecessary. let peers_on_subnet = self .network_globals - .peers() + .peers + .read() .good_peers_on_subnet(subnet_query.subnet) .count(); @@ -692,7 +691,7 @@ impl Discovery { return false; } - let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet; + let target_peers = TARGET_SUBNET_PEERS.saturating_sub(peers_on_subnet); trace!(self.log, "Discovery query started for subnet"; "subnet_query" => ?subnet_query, "connected_peers_on_subnet" => peers_on_subnet, @@ -958,12 +957,24 @@ impl NetworkBehaviour for Discovery { &mut self, peer_id: Option, _handler: Self::ProtocolsHandler, - _error: &DialError, + error: &DialError, ) { if let Some(peer_id) = peer_id { - // set peer as disconnected in discovery DHT - debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); - self.disconnect_peer(&peer_id); + match error { + DialError::Banned + | DialError::LocalPeerId + | DialError::InvalidPeerId + | DialError::ConnectionIo(_) + | DialError::NoAddresses + | DialError::Transport(_) => { + // set peer as disconnected in discovery DHT + debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); + self.disconnect_peer(&peer_id); + } + DialError::ConnectionLimit(_) + | DialError::DialPeerConditionFalse(_) + | DialError::Aborted => {} + } } } @@ -1027,6 +1038,7 @@ impl NetworkBehaviour for Discovery { Discv5Event::SocketUpdated(socket) => { info!(self.log, "Address updated"; "ip" => %socket.ip(), "udp_port" => %socket.port()); metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); + metrics::check_nat(); // Discv5 will have updated our local ENR. We save the updated version // to disk. let enr = self.discv5.local_enr(); @@ -1084,7 +1096,7 @@ mod tests { ..Default::default() }; let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); - let enr: Enr = build_enr::(&enr_key, &config, EnrForkId::default()).unwrap(); + let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 733dc72ab5..0460a42c8a 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -10,12 +10,14 @@ mod config; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; -mod metrics; +pub mod metrics; pub mod peer_manager; pub mod rpc; mod service; pub mod types; +pub use config::gossip_max_size; + use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use std::str::FromStr; @@ -64,13 +66,16 @@ pub use crate::types::{ error, Enr, EnrSyncCommitteeBitfield, GossipTopic, NetworkGlobals, PubsubMessage, Subnet, SubnetDiscovery, }; + +pub use open_metrics_client; + pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; pub use libp2p; pub use libp2p::bandwidth::BandwidthSinks; -pub use libp2p::gossipsub::{MessageAcceptance, MessageId, Topic, TopicHash}; +pub use libp2p::gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{multiaddr, Multiaddr}; pub use metrics::scrape_discovery_metrics; @@ -80,4 +85,4 @@ pub use peer_manager::{ peerdb::PeerDB, ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; -pub use service::{load_private_key, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 4767f287f4..1dfe0448b7 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,16 +1,19 @@ pub use lighthouse_metrics::*; lazy_static! { + pub static ref NAT_OPEN: Result = try_create_int_counter( + "nat_open", + "An estimate indicating if the local node is exposed to the internet." + ); pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( "libp2p_address_update_total", "Count of libp2p socked updated events (when our view of our IP address has changed)" ); pub static ref PEERS_CONNECTED: Result = try_create_int_gauge( - "libp2p_peer_connected_peers_total", + "libp2p_peers", "Count of libp2p peers currently connected" ); - pub static ref PEERS_CONNECTED_INTEROP: Result = - try_create_int_gauge("libp2p_peers", "Count of libp2p peers currently connected"); + pub static ref PEER_CONNECT_EVENT_COUNT: Result = try_create_int_counter( "libp2p_peer_connect_event_total", "Count of libp2p peer connect events (not the current number of connected peers)" @@ -19,6 +22,14 @@ lazy_static! { "libp2p_peer_disconnect_event_total", "Count of libp2p peer disconnect events" ); + pub static ref DISCOVERY_SENT_BYTES: Result = try_create_int_gauge( + "discovery_sent_bytes", + "The number of bytes sent in discovery" + ); + pub static ref DISCOVERY_RECV_BYTES: Result = try_create_int_gauge( + "discovery_recv_bytes", + "The number of bytes received in discovery" + ); pub static ref DISCOVERY_QUEUE: Result = try_create_int_gauge( "discovery_queue_size", "The number of discovery queries awaiting execution" @@ -31,11 +42,7 @@ lazy_static! { "discovery_sessions", "The number of active discovery sessions with peers" ); - pub static ref DISCOVERY_REQS_IP: Result = try_create_float_gauge_vec( - "discovery_reqs_per_ip", - "Unsolicited discovery requests per ip per second", - &["Addresses"] - ); + pub static ref PEERS_PER_CLIENT: Result = try_create_int_gauge_vec( "libp2p_peers_per_client", "The connected peers via client implementation", @@ -57,6 +64,11 @@ lazy_static! { "RPC errors per client", &["client", "rpc_error", "direction"] ); + pub static ref TOTAL_RPC_REQUESTS: Result = try_create_int_counter_vec( + "libp2p_rpc_requests_total", + "RPC requests total", + &["type"] + ); pub static ref PEER_ACTION_EVENTS_PER_CLIENT: Result = try_create_int_counter_vec( "libp2p_peer_actions_per_client", @@ -69,26 +81,66 @@ lazy_static! { "Gossipsub messages that we did not accept, per client", &["client", "validation_result"] ); + + pub static ref PEER_SCORE_DISTRIBUTION: Result = + try_create_int_gauge_vec( + "peer_score_distribution", + "The distribution of connected peer scores", + &["position"] + ); + + pub static ref PEER_SCORE_PER_CLIENT: Result = + try_create_float_gauge_vec( + "peer_score_per_client", + "Average score per client", + &["client"] + ); + + /* + * Inbound/Outbound peers + */ + /// The number of peers that dialed us. + pub static ref NETWORK_INBOUND_PEERS: Result = + try_create_int_gauge("network_inbound_peers","The number of peers that are currently connected that have dialed us."); + + /// The number of peers that we dialed us. + pub static ref NETWORK_OUTBOUND_PEERS: Result = + try_create_int_gauge("network_outbound_peers","The number of peers that are currently connected that we dialed."); + + /* + * Peer Reporting + */ + pub static ref REPORT_PEER_MSGS: Result = try_create_int_counter_vec( + "libp2p_report_peer_msgs_total", + "Number of peer reports per msg", + &["msg"] + ); +} + +/// Checks if we consider the NAT open. +/// +/// Conditions for an open NAT: +/// 1. We have 1 or more SOCKET_UPDATED messages. This occurs when discovery has a majority of +/// users reporting an external port and our ENR gets updated. +/// 2. We have 0 SOCKET_UPDATED messages (can be true if the port was correct on boot), then we +/// rely on whether we have any inbound messages. If we have no socket update messages, but +/// manage to get at least one inbound peer, we are exposed correctly. +pub fn check_nat() { + // NAT is already deemed open. + if NAT_OPEN.as_ref().map(|v| v.get()).unwrap_or(0) != 0 { + return; + } + if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) == 0 + || NETWORK_INBOUND_PEERS.as_ref().map(|v| v.get()).unwrap_or(0) != 0_i64 + { + inc_counter(&NAT_OPEN); + } } pub fn scrape_discovery_metrics() { let metrics = discv5::metrics::Metrics::from(discv5::Discv5::raw_metrics()); - set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second); - set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64); - - let process_gauge_vec = |gauge: &Result, metrics: discv5::metrics::Metrics| { - if let Ok(gauge_vec) = gauge { - gauge_vec.reset(); - for (ip, value) in metrics.requests_per_ip_per_second.iter() { - if let Ok(metric) = gauge_vec.get_metric_with_label_values(&[&format!("{:?}", ip)]) - { - metric.set(*value); - } - } - } - }; - - process_gauge_vec(&DISCOVERY_REQS_IP, metrics); + set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64); + set_gauge(&DISCOVERY_RECV_BYTES, metrics.bytes_recv as i64); } diff --git a/beacon_node/lighthouse_network/src/peer_manager/config.rs b/beacon_node/lighthouse_network/src/peer_manager/config.rs index aef8f96504..6c5523de45 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/config.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/config.rs @@ -16,6 +16,8 @@ pub struct Config { /* Peer count related configurations */ /// Whether discovery is enabled. pub discovery_enabled: bool, + /// Whether metrics are enabled. + pub metrics_enabled: bool, /// Target number of peers to connect to. pub target_peer_count: usize, @@ -34,6 +36,7 @@ impl Default for Config { fn default() -> Self { Config { discovery_enabled: true, + metrics_enabled: false, target_peer_count: DEFAULT_TARGET_PEERS, status_interval: DEFAULT_STATUS_INTERVAL, ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index cfad40aa89..6b8f6fff60 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,6 +1,6 @@ //! Implementation of Lighthouse's peer management system. -use crate::discovery::TARGET_SUBNET_PEERS; +use crate::behaviour::TARGET_SUBNET_PEERS; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::{error, metrics, Gossipsub}; use crate::{NetworkGlobals, PeerId}; @@ -8,13 +8,14 @@ use crate::{Subnet, SubnetDiscovery}; use discv5::Enr; use hashset_delay::HashSetDelay; use libp2p::identify::IdentifyInfo; -use peerdb::{BanOperation, BanResult, ScoreUpdateResult}; +use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use slog::{debug, error, warn}; use smallvec::SmallVec; use std::{ sync::Arc, time::{Duration, Instant}, }; +use strum::IntoEnumIterator; use types::{EthSpec, SyncSubnetId}; pub use libp2p::core::{identity::Keypair, Multiaddr}; @@ -46,7 +47,7 @@ pub const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.3; /// requiring subnet peers. More specifically, if our target peer limit is 50, and our excess peer /// limit is 55, and we are at 55 peers, the following parameter provisions a few more slots of /// dialing priority peers we need for validator duties. -pub const PRIORITY_PEER_EXCESS: f32 = 0.05; +pub const PRIORITY_PEER_EXCESS: f32 = 0.1; /// The main struct that handles peer's reputation and connection status. pub struct PeerManager { @@ -71,6 +72,8 @@ pub struct PeerManager { heartbeat: tokio::time::Interval, /// Keeps track of whether the discovery service is enabled or not. discovery_enabled: bool, + /// Keeps track if the current instance is reporting metrics or not. + metrics_enabled: bool, /// The logger associated with the `PeerManager`. log: slog::Logger, } @@ -111,6 +114,7 @@ impl PeerManager { ) -> error::Result { let config::Config { discovery_enabled, + metrics_enabled, target_peer_count, status_interval, ping_interval_inbound, @@ -130,6 +134,7 @@ impl PeerManager { sync_committee_subnets: Default::default(), heartbeat, discovery_enabled, + metrics_enabled, log: log.clone(), }) } @@ -143,14 +148,20 @@ impl PeerManager { /// This will send a goodbye and disconnect the peer if it is connected or dialing. pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { // Update the sync status if required - if let Some(info) = self.network_globals.peers_mut().peer_info_mut(peer_id) { + if let Some(info) = self.network_globals.peers.write().peer_info_mut(peer_id) { debug!(self.log, "Sending goodbye to peer"; "peer_id" => %peer_id, "reason" => %reason, "score" => %info.score()); if matches!(reason, GoodbyeReason::IrrelevantNetwork) { info.update_sync_status(SyncStatus::IrrelevantPeer); } } - self.report_peer(peer_id, PeerAction::Fatal, source, Some(reason)); + self.report_peer( + peer_id, + PeerAction::Fatal, + source, + Some(reason), + "goodbye_peer", + ); } /// Reports a peer for some action. @@ -162,11 +173,13 @@ impl PeerManager { action: PeerAction, source: ReportSource, reason: Option, + msg: &'static str, ) { let action = self .network_globals - .peers_mut() - .report_peer(peer_id, action, source); + .peers + .write() + .report_peer(peer_id, action, source, msg); self.handle_score_action(peer_id, action, reason); } @@ -263,13 +276,14 @@ impl PeerManager { if (min_ttl.is_some() && connected_or_dialing + to_dial_peers.len() < self.max_priority_peers() || connected_or_dialing + to_dial_peers.len() < self.max_peers()) - && self.network_globals.peers().should_dial(&peer_id) + && self.network_globals.peers.read().should_dial(&peer_id) { // This should be updated with the peer dialing. In fact created once the peer is // dialed if let Some(min_ttl) = min_ttl { self.network_globals - .peers_mut() + .peers + .write() .update_min_ttl(&peer_id, min_ttl); } to_dial_peers.push(peer_id); @@ -339,22 +353,27 @@ impl PeerManager { /// /// This is used to determine if we should accept incoming connections. pub fn ban_status(&self, peer_id: &PeerId) -> BanResult { - self.network_globals.peers().ban_status(peer_id) + self.network_globals.peers.read().ban_status(peer_id) } pub fn is_connected(&self, peer_id: &PeerId) -> bool { - self.network_globals.peers().is_connected(peer_id) + self.network_globals.peers.read().is_connected(peer_id) } /// Reports whether the peer limit is reached in which case we stop allowing new incoming /// connections. - pub fn peer_limit_reached(&self) -> bool { - self.network_globals.connected_or_dialing_peers() >= self.max_peers() + pub fn peer_limit_reached(&self, count_dialing: bool) -> bool { + let max_peers = self.max_peers(); + if count_dialing { + self.network_globals.connected_or_dialing_peers() >= max_peers + } else { + self.network_globals.connected_peers() >= max_peers + } } /// Updates `PeerInfo` with `identify` information. pub fn identify(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { - if let Some(peer_info) = self.network_globals.peers_mut().peer_info_mut(peer_id) { + if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { let previous_kind = peer_info.client().kind.clone(); let previous_listening_addresses = peer_info.set_listening_addresses(info.listen_addrs.clone()); @@ -371,19 +390,21 @@ impl PeerManager { "protocols" => ?info.protocols ); - // update the peer client kind metric - if let Some(v) = metrics::get_int_gauge( - &metrics::PEERS_PER_CLIENT, - &[&peer_info.client().kind.to_string()], + // update the peer client kind metric if the peer is connected + if matches!( + peer_info.connection_status(), + PeerConnectionStatus::Connected { .. } + | PeerConnectionStatus::Disconnecting { .. } ) { - v.inc() - }; - if let Some(v) = metrics::get_int_gauge( - &metrics::PEERS_PER_CLIENT, - &[&previous_kind.to_string()], - ) { - v.dec() - }; + metrics::inc_gauge_vec( + &metrics::PEERS_PER_CLIENT, + &[&peer_info.client().kind.to_string()], + ); + metrics::dec_gauge_vec( + &metrics::PEERS_PER_CLIENT, + &[&previous_kind.to_string()], + ); + } } } else { error!(self.log, "Received an Identify response from an unknown peer"; "peer_id" => peer_id.to_string()); @@ -401,7 +422,7 @@ impl PeerManager { direction: ConnectionDirection, ) { let client = self.network_globals.client(peer_id); - let score = self.network_globals.peers().score(peer_id); + let score = self.network_globals.peers.read().score(peer_id); debug!(self.log, "RPC Error"; "protocol" => %protocol, "err" => %err, "client" => %client, "peer_id" => %peer_id, "score" => %score, "direction" => ?direction); metrics::inc_counter_vec( @@ -497,13 +518,19 @@ impl PeerManager { RPCError::Disconnected => return, // No penalty for a graceful disconnection }; - self.report_peer(peer_id, peer_action, ReportSource::RPC, None); + self.report_peer( + peer_id, + peer_action, + ReportSource::RPC, + None, + "handle_rpc_error", + ); } /// A ping request has been received. // NOTE: The behaviour responds with a PONG automatically pub fn ping_request(&mut self, peer_id: &PeerId, seq: u64) { - if let Some(peer_info) = self.network_globals.peers().peer_info(peer_id) { + if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { // received a ping // reset the to-ping timer for this peer debug!(self.log, "Received a ping request"; "peer_id" => %peer_id, "seq_no" => seq); @@ -540,7 +567,7 @@ impl PeerManager { /// A PONG has been returned from a peer. pub fn pong_response(&mut self, peer_id: &PeerId, seq: u64) { - if let Some(peer_info) = self.network_globals.peers().peer_info(peer_id) { + if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { // received a pong // if the sequence number is unknown send update the meta data of the peer. @@ -563,7 +590,7 @@ impl PeerManager { /// Received a metadata response from a peer. pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData) { - if let Some(peer_info) = self.network_globals.peers_mut().peer_info_mut(peer_id) { + if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { if let Some(known_meta_data) = &peer_info.meta_data() { if *known_meta_data.seq_number() < *meta_data.seq_number() { debug!(self.log, "Updating peer's metadata"; @@ -590,7 +617,8 @@ impl PeerManager { pub(crate) fn update_gossipsub_scores(&mut self, gossipsub: &Gossipsub) { let actions = self .network_globals - .peers_mut() + .peers + .write() .update_gossipsub_scores(self.target_peers, gossipsub); for (peer_id, score_action) in actions { @@ -598,6 +626,46 @@ impl PeerManager { } } + // This function updates metrics for all connected peers. + fn update_connected_peer_metrics(&self) { + // Do nothing if we don't have metrics enabled. + if !self.metrics_enabled { + return; + } + + let mut connected_peer_count = 0; + let mut inbound_connected_peers = 0; + let mut outbound_connected_peers = 0; + let mut clients_per_peer = HashMap::new(); + + for (_peer, peer_info) in self.network_globals.peers.read().connected_peers() { + connected_peer_count += 1; + if let PeerConnectionStatus::Connected { n_in, .. } = peer_info.connection_status() { + if *n_in > 0 { + inbound_connected_peers += 1; + } else { + outbound_connected_peers += 1; + } + } + *clients_per_peer + .entry(peer_info.client().kind.to_string()) + .or_default() += 1; + } + + metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peer_count); + metrics::set_gauge(&metrics::NETWORK_INBOUND_PEERS, inbound_connected_peers); + metrics::set_gauge(&metrics::NETWORK_OUTBOUND_PEERS, outbound_connected_peers); + + for client_kind in ClientKind::iter() { + let value = clients_per_peer.get(&client_kind.to_string()).unwrap_or(&0); + metrics::set_gauge_vec( + &metrics::PEERS_PER_CLIENT, + &[&client_kind.to_string()], + *value as i64, + ); + } + } + /* Internal functions */ /// Sets a peer as connected as long as their reputation allows it @@ -630,7 +698,11 @@ impl PeerManager { /// /// This is also called when dialing a peer fails. fn inject_disconnect(&mut self, peer_id: &PeerId) { - let ban_operation = self.network_globals.peers_mut().inject_disconnect(peer_id); + let (ban_operation, purged_peers) = self + .network_globals + .peers + .write() + .inject_disconnect(peer_id); if let Some(ban_operation) = ban_operation { // The peer was awaiting a ban, continue to ban the peer. @@ -641,6 +713,11 @@ impl PeerManager { self.inbound_ping_peers.remove(peer_id); self.outbound_ping_peers.remove(peer_id); self.status_peers.remove(peer_id); + self.events.extend( + purged_peers + .into_iter() + .map(|(peer_id, unbanned_ips)| PeerManagerEvent::UnBanned(peer_id, unbanned_ips)), + ); } /// Registers a peer as connected. The `ingoing` parameter determines if the peer is being @@ -656,7 +733,7 @@ impl PeerManager { enr: Option, ) -> bool { { - let mut peerdb = self.network_globals.peers_mut(); + let mut peerdb = self.network_globals.peers.write(); if !matches!(peerdb.ban_status(peer_id), BanResult::NotBanned) { // don't connect if the peer is banned error!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id); @@ -688,21 +765,6 @@ impl PeerManager { // increment prometheus metrics metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peers); - metrics::set_gauge(&metrics::PEERS_CONNECTED_INTEROP, connected_peers); - - // Increment the PEERS_PER_CLIENT metric - if let Some(kind) = self - .network_globals - .peers() - .peer_info(peer_id) - .map(|peer_info| peer_info.client().kind.clone()) - { - if let Some(v) = - metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) - { - v.inc() - }; - } true } @@ -712,7 +774,8 @@ impl PeerManager { self.events .push(PeerManagerEvent::DisconnectPeer(peer_id, reason)); self.network_globals - .peers_mut() + .peers + .write() .notify_disconnecting(&peer_id, false); } @@ -728,7 +791,8 @@ impl PeerManager { .filter_map(|(k, v)| { if self .network_globals - .peers() + .peers + .read() .good_peers_on_subnet(Subnet::SyncCommittee(*k)) .count() < TARGET_SUBNET_PEERS @@ -777,11 +841,14 @@ impl PeerManager { } // Updates peer's scores and unban any peers if required. - let actions = self.network_globals.peers_mut().update_scores(); + let actions = self.network_globals.peers.write().update_scores(); for (peer_id, action) in actions { self.handle_score_action(&peer_id, action, None); } + // Update peer score metrics; + self.update_peer_score_metrics(); + // Maintain minimum count for sync committee peers. self.maintain_sync_committee_peers(); @@ -796,7 +863,8 @@ impl PeerManager { let mut n_outbound_removed = 0; for (peer_id, info) in self .network_globals - .peers() + .peers + .read() .worst_connected_peers() .iter() .filter(|(_, info)| !info.has_future_duty()) @@ -819,6 +887,75 @@ impl PeerManager { self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers); } } + + // Update metrics related to peer scoring. + fn update_peer_score_metrics(&self) { + if !self.metrics_enabled { + return; + } + // reset the gauges + let _ = metrics::PEER_SCORE_DISTRIBUTION + .as_ref() + .map(|gauge| gauge.reset()); + let _ = metrics::PEER_SCORE_PER_CLIENT + .as_ref() + .map(|gauge| gauge.reset()); + + let mut avg_score_per_client: HashMap = HashMap::with_capacity(5); + { + let peers_db_read_lock = self.network_globals.peers.read(); + let connected_peers = peers_db_read_lock.best_peers_by_status(PeerInfo::is_connected); + let total_peers = connected_peers.len(); + for (id, (_peer, peer_info)) in connected_peers.into_iter().enumerate() { + // First quartile + if id == 0 { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["1st"], + peer_info.score().score() as i64, + ); + } else if id == (total_peers * 3 / 4).saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["3/4"], + peer_info.score().score() as i64, + ); + } else if id == (total_peers / 2).saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["1/2"], + peer_info.score().score() as i64, + ); + } else if id == (total_peers / 4).saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["1/4"], + peer_info.score().score() as i64, + ); + } else if id == total_peers.saturating_sub(1) { + metrics::set_gauge_vec( + &metrics::PEER_SCORE_DISTRIBUTION, + &["last"], + peer_info.score().score() as i64, + ); + } + + let mut score_peers: &mut (f64, usize) = avg_score_per_client + .entry(peer_info.client().kind.to_string()) + .or_default(); + score_peers.0 += peer_info.score().score(); + score_peers.1 += 1; + } + } // read lock ended + + for (client, (score, peers)) in avg_score_per_client { + metrics::set_float_gauge_vec( + &metrics::PEER_SCORE_PER_CLIENT, + &[&client.to_string()], + score / (peers as f64), + ); + } + } } enum ConnectingType { @@ -839,9 +976,6 @@ enum ConnectingType { #[cfg(test)] mod tests { use super::*; - use crate::discovery::enr_ext::CombinedKeyExt; - use crate::rpc::methods::{MetaData, MetaDataV2}; - use discv5::enr::CombinedKey; use slog::{o, Drain}; use types::MinimalEthSpec as E; @@ -864,23 +998,7 @@ mod tests { ..Default::default() }; let log = build_log(slog::Level::Debug, false); - let globals = { - let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); - let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); - NetworkGlobals::new( - enr, - 9000, - 9000, - MetaData::V2(MetaDataV2 { - seq_number: 0, - attnets: Default::default(), - syncnets: Default::default(), - }), - vec![], - &log, - ) - }; + let globals = NetworkGlobals::new_test_globals(&log); PeerManager::new(config, Arc::new(globals), &log) .await .unwrap() @@ -915,14 +1033,16 @@ mod tests { // Set the outbound-only peers to have the lowest score. peer_manager .network_globals - .peers_mut() + .peers + .write() .peer_info_mut(&outbound_only_peer1) .unwrap() .add_to_score(-1.0); peer_manager .network_globals - .peers_mut() + .peers + .write() .peer_info_mut(&outbound_only_peer2) .unwrap() .add_to_score(-2.0); @@ -938,11 +1058,13 @@ mod tests { assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 3); assert!(peer_manager .network_globals - .peers() + .peers + .read() .is_connected(&outbound_only_peer1)); assert!(!peer_manager .network_globals - .peers() + .peers + .read() .is_connected(&outbound_only_peer2)); peer_manager.heartbeat(); @@ -971,7 +1093,8 @@ mod tests { ); peer_manager .network_globals - .peers_mut() + .peers + .write() .peer_info_mut(&(outbound_only_peer)) .unwrap() .add_to_score(-1.0); @@ -1011,25 +1134,29 @@ mod tests { ); peer_manager .network_globals - .peers_mut() + .peers + .write() .peer_info_mut(&(inbound_only_peer1)) .unwrap() .add_to_score(-19.8); peer_manager .network_globals - .peers_mut() + .peers + .write() .peer_info_mut(&(outbound_only_peer1)) .unwrap() .add_to_score(-19.8); peer_manager .network_globals - .peers_mut() + .peers + .write() .peer_info_mut(&(inbound_only_peer1)) .unwrap() .set_gossipsub_score(-85.0); peer_manager .network_globals - .peers_mut() + .peers + .write() .peer_info_mut(&(outbound_only_peer1)) .unwrap() .set_gossipsub_score(-85.0); @@ -1067,13 +1194,15 @@ mod tests { ); peer_manager .network_globals - .peers_mut() + .peers + .write() .peer_info_mut(&(inbound_only_peer1)) .unwrap() .add_to_score(-19.9); peer_manager .network_globals - .peers_mut() + .peers + .write() .peer_info_mut(&(inbound_only_peer1)) .unwrap() .set_gossipsub_score(-85.0); diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 840d6bc584..d194deffd4 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -111,15 +111,10 @@ impl NetworkBehaviour for PeerManager { endpoint: &ConnectedPoint, _failed_addresses: Option<&Vec>, ) { - // Log the connection - match &endpoint { - ConnectedPoint::Listener { .. } => { - debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Incoming"); - } - ConnectedPoint::Dialer { .. } => { - debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Outgoing"); - // TODO: Ensure we have that address registered. - } + debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => ?endpoint.to_endpoint()); + // Check NAT if metrics are enabled + if self.network_globals.local_enr.read().udp().is_some() { + metrics::check_nat(); } // Check to make sure the peer is not supposed to be banned @@ -142,11 +137,14 @@ impl NetworkBehaviour for PeerManager { BanResult::NotBanned => {} } + // Count dialing peers in the limit if the peer dialied us. + let count_dialing = endpoint.is_listener(); // Check the connection limits - if self.peer_limit_reached() + if self.peer_limit_reached(count_dialing) && self .network_globals - .peers() + .peers + .read() .peer_info(peer_id) .map_or(true, |peer| !peer.has_future_duty()) { @@ -155,10 +153,8 @@ impl NetworkBehaviour for PeerManager { return; } - // Register the newly connected peer (regardless if we are about to disconnect them). // NOTE: We don't register peers that we are disconnecting immediately. The network service // does not need to know about these peers. - // let enr match endpoint { ConnectedPoint::Listener { send_back_addr, .. } => { self.inject_connect_ingoing(peer_id, send_back_addr.clone(), None); @@ -172,19 +168,17 @@ impl NetworkBehaviour for PeerManager { } } - let connected_peers = self.network_globals.connected_peers() as i64; - // increment prometheus metrics + self.update_connected_peer_metrics(); metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); - metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peers); - metrics::set_gauge(&metrics::PEERS_CONNECTED_INTEROP, connected_peers); } fn inject_disconnected(&mut self, peer_id: &PeerId) { // There are no more connections if self .network_globals - .peers() + .peers + .read() .is_connected_or_disconnecting(peer_id) { // We are disconnecting the peer or the peer has already been connected. @@ -194,20 +188,6 @@ impl NetworkBehaviour for PeerManager { self.events .push(PeerManagerEvent::PeerDisconnected(*peer_id)); debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id); - - // Decrement the PEERS_PER_CLIENT metric - if let Some(kind) = self - .network_globals - .peers() - .peer_info(peer_id) - .map(|info| info.client().kind.clone()) - { - if let Some(v) = - metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()]) - { - v.dec() - }; - } } // NOTE: It may be the case that a rejected node, due to too many peers is disconnected @@ -215,12 +195,9 @@ impl NetworkBehaviour for PeerManager { // reference so that peer manager can track this peer. self.inject_disconnect(peer_id); - let connected_peers = self.network_globals.connected_peers() as i64; - // Update the prometheus metrics + self.update_connected_peer_metrics(); metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); - metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peers); - metrics::set_gauge(&metrics::PEERS_CONNECTED_INTEROP, connected_peers); } fn inject_address_change( @@ -259,7 +236,7 @@ impl NetworkBehaviour for PeerManager { _error: &DialError, ) { if let Some(peer_id) = peer_id { - if !self.network_globals.peers().is_connected(&peer_id) { + if !self.network_globals.peers.read().is_connected(&peer_id) { self.inject_disconnect(&peer_id); } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index cb2816197d..cddff1218c 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -23,7 +23,7 @@ pub mod sync_status; /// Max number of disconnected nodes to remember. const MAX_DC_PEERS: usize = 500; /// The maximum number of banned nodes to remember. -const MAX_BANNED_PEERS: usize = 1000; +pub const MAX_BANNED_PEERS: usize = 1000; /// We ban an IP if there are more than `BANNED_PEERS_PER_IP_THRESHOLD` banned peers with this IP. const BANNED_PEERS_PER_IP_THRESHOLD: usize = 5; /// Relative factor of peers that are allowed to have a negative gossipsub score without penalizing @@ -314,7 +314,7 @@ impl PeerDB { .map(|(id, _)| id) } - /// Returns the peer's connection status. Returns None if the peer is not in the DB. + /// Returns the peer's connection status. Returns unknown if the peer is not in the DB. pub fn connection_status(&self, peer_id: &PeerId) -> Option { self.peer_info(peer_id) .map(|info| info.connection_status().clone()) @@ -490,7 +490,10 @@ impl PeerDB { peer_id: &PeerId, action: PeerAction, source: ReportSource, + msg: &'static str, ) -> ScoreUpdateResult { + metrics::inc_counter_vec(&metrics::REPORT_PEER_MSGS, &[msg]); + match self.peers.get_mut(peer_id) { Some(info) => { let previous_state = info.score_state(); @@ -502,7 +505,13 @@ impl PeerDB { let result = Self::handle_score_transition(previous_state, peer_id, info, &self.log); if previous_state == info.score_state() { - debug!(self.log, "Peer score adjusted"; "peer_id" => %peer_id, "score" => %info.score()); + debug!( + self.log, + "Peer score adjusted"; + "msg" => %msg, + "peer_id" => %peer_id, + "score" => %info.score() + ); } match result { ScoreTransitionResult::Banned => { @@ -522,44 +531,28 @@ impl PeerDB { } ScoreTransitionResult::NoAction => ScoreUpdateResult::NoAction, ScoreTransitionResult::Unbanned => { - error!(self.log, "Report peer action lead to an unbanning"; "peer_id" => %peer_id); + error!( + self.log, + "Report peer action lead to an unbanning"; + "msg" => %msg, + "peer_id" => %peer_id + ); ScoreUpdateResult::NoAction } } } None => { - debug!(self.log, "Reporting a peer that doesn't exist"; "peer_id" =>%peer_id); + debug!( + self.log, + "Reporting a peer that doesn't exist"; + "msg" => %msg, + "peer_id" =>%peer_id + ); ScoreUpdateResult::NoAction } } } - // Connection Status - - /// A peer is being dialed. - // VISIBILITY: Only the peer manager can adjust the connection state - pub(super) fn dialing_peer(&mut self, peer_id: &PeerId, enr: Option) { - let info = self.peers.entry(*peer_id).or_default(); - if let Some(enr) = enr { - info.set_enr(enr); - } - - if let Err(e) = info.dialing_peer() { - error!(self.log, "{}", e; "peer_id" => %peer_id); - } - - // If the peer was banned, remove the banned peer and addresses. - if info.is_banned() { - self.banned_peers_count - .remove_banned_peer(info.seen_ip_addresses()); - } - - // If the peer was disconnected, reduce the disconnected peer count. - if info.is_disconnected() { - self.disconnected_peers = self.disconnected_peers().count().saturating_sub(1); - } - } - /// Update min ttl of a peer. // VISIBILITY: Only the peer manager can update the min_ttl pub(super) fn update_min_ttl(&mut self, peer_id: &PeerId, min_ttl: Instant) { @@ -614,6 +607,12 @@ impl PeerDB { }); } + /// A peer is being dialed. + // VISIBILITY: Only the peer manager can adjust the connection state + pub(super) fn dialing_peer(&mut self, peer_id: &PeerId, enr: Option) { + self.update_connection_state(peer_id, NewConnectionState::Dialing { enr }); + } + /// Sets a peer as connected with an ingoing connection. // VISIBILITY: Only the peer manager can adjust the connection state. pub(super) fn connect_ingoing( @@ -667,7 +666,11 @@ impl PeerDB { // connection state for an unknown peer. if !matches!( new_state, - NewConnectionState::Connected { .. } | NewConnectionState::Disconnecting { .. } + NewConnectionState::Connected { .. } // We have established a new connection (peer may not have been seen before) + | NewConnectionState::Disconnecting { .. }// We are disconnecting from a peer that may not have been registered before + | NewConnectionState::Dialing { .. } // We are dialing a potentially new peer + | NewConnectionState::Disconnected { .. } // Dialing a peer that responds by a different ID can be immediately + // disconnected without having being stored in the db before ) { warn!(log_ref, "Updating state of unknown peer"; "peer_id" => %peer_id, "new_state" => ?new_state); @@ -689,7 +692,11 @@ impl PeerDB { // Handle all the possible state changes match (info.connection_status().clone(), new_state) { - /* Handle the transition to a connected state */ + /* CONNECTED + * + * + * Handles the transition to a connected state + */ ( current_state, NewConnectionState::Connected { @@ -709,6 +716,7 @@ impl PeerDB { } PeerConnectionStatus::Banned { .. } => { error!(self.log, "Accepted a connection from a banned peer"; "peer_id" => %peer_id); + // TODO: check if this happens and report the unban back self.banned_peers_count .remove_banned_peer(info.seen_ip_addresses()); } @@ -745,7 +753,47 @@ impl PeerDB { } } - /* Handle the transition to the disconnected state */ + /* DIALING + * + * + * Handles the transition to a dialing state + */ + (old_state, NewConnectionState::Dialing { enr }) => { + match old_state { + PeerConnectionStatus::Banned { .. } => { + warn!(self.log, "Dialing a banned peer"; "peer_id" => %peer_id); + self.banned_peers_count + .remove_banned_peer(info.seen_ip_addresses()); + } + PeerConnectionStatus::Disconnected { .. } => { + self.disconnected_peers = self.disconnected_peers.saturating_sub(1); + } + PeerConnectionStatus::Connected { .. } => { + warn!(self.log, "Dialing an already connected peer"; "peer_id" => %peer_id) + } + PeerConnectionStatus::Dialing { .. } => { + warn!(self.log, "Dialing an already dialing peer"; "peer_id" => %peer_id) + } + PeerConnectionStatus::Disconnecting { .. } => { + warn!(self.log, "Dialing a disconnecting peer"; "peer_id" => %peer_id) + } + PeerConnectionStatus::Unknown => {} // default behaviour + } + // Update the ENR if one is known. + if let Some(enr) = enr { + info.set_enr(enr); + } + + if let Err(e) = info.set_dialing_peer() { + error!(self.log, "{}", e; "peer_id" => %peer_id); + } + } + + /* DISCONNECTED + * + * + * Handle the transition to the disconnected state + */ (old_state, NewConnectionState::Disconnected) => { // Remove all subnets for disconnected peers. info.clear_subnets(); @@ -765,7 +813,6 @@ impl PeerDB { .seen_ip_addresses() .filter(|ip| known_banned_ips.contains(ip)) .collect::>(); - self.shrink_to_fit(); return Some(BanOperation::ReadyToBan(banned_ips)); } PeerConnectionStatus::Disconnecting { .. } @@ -776,16 +823,28 @@ impl PeerDB { info.set_connection_status(PeerConnectionStatus::Disconnected { since: Instant::now(), }); - self.shrink_to_fit(); } } } - /* Handle the transition to the disconnecting state */ + /* DISCONNECTING + * + * + * Handles the transition to a disconnecting state + */ (PeerConnectionStatus::Banned { .. }, NewConnectionState::Disconnecting { to_ban }) => { error!(self.log, "Disconnecting from a banned peer"; "peer_id" => %peer_id); info.set_connection_status(PeerConnectionStatus::Disconnecting { to_ban }); } + ( + PeerConnectionStatus::Disconnected { .. }, + NewConnectionState::Disconnecting { to_ban }, + ) => { + // If the peer was previously disconnected and is now being disconnected, decrease + // the disconnected_peers counter. + self.disconnected_peers = self.disconnected_peers.saturating_sub(1); + info.set_connection_status(PeerConnectionStatus::Disconnecting { to_ban }); + } (_, NewConnectionState::Disconnecting { to_ban }) => { // We overwrite all states and set this peer to be disconnecting. // NOTE: A peer can be in the disconnected state and transition straight to a @@ -794,7 +853,11 @@ impl PeerDB { info.set_connection_status(PeerConnectionStatus::Disconnecting { to_ban }); } - /* Handle transitioning to the banned state */ + /* BANNED + * + * + * Handles the transition to a banned state + */ (PeerConnectionStatus::Disconnected { .. }, NewConnectionState::Banned) => { // It is possible to ban a peer that is currently disconnected. This can occur when // there are many events that score it poorly and are processed after it has disconnected. @@ -809,7 +872,6 @@ impl PeerDB { .seen_ip_addresses() .filter(|ip| known_banned_ips.contains(ip)) .collect::>(); - self.shrink_to_fit(); return Some(BanOperation::ReadyToBan(banned_ips)); } (PeerConnectionStatus::Disconnecting { .. }, NewConnectionState::Banned) => { @@ -850,11 +912,14 @@ impl PeerDB { .seen_ip_addresses() .filter(|ip| known_banned_ips.contains(ip)) .collect::>(); - self.shrink_to_fit(); return Some(BanOperation::ReadyToBan(banned_ips)); } - /* Handle the connection state of unbanning a peer */ + /* UNBANNED + * + * + * Handles the transition to an unbanned state + */ (old_state, NewConnectionState::Unbanned) => { if matches!(info.score_state(), ScoreState::Banned) { error!(self.log, "Unbanning a banned peer"; "peer_id" => %peer_id); @@ -874,9 +939,7 @@ impl PeerDB { // Increment the disconnected count and reduce the banned count self.banned_peers_count .remove_banned_peer(info.seen_ip_addresses()); - self.disconnected_peers = - self.disconnected_peers().count().saturating_add(1); - self.shrink_to_fit(); + self.disconnected_peers = self.disconnected_peers.saturating_add(1); } } } @@ -887,8 +950,14 @@ impl PeerDB { /// Sets the peer as disconnected. A banned peer remains banned. If the node has become banned, /// this returns true, otherwise this is false. // VISIBILITY: Only the peer manager can adjust the connection state. - pub(super) fn inject_disconnect(&mut self, peer_id: &PeerId) -> Option { - self.update_connection_state(peer_id, NewConnectionState::Disconnected) + pub(super) fn inject_disconnect( + &mut self, + peer_id: &PeerId, + ) -> (Option, Vec<(PeerId, Vec)>) { + // A peer can be banned for disconnecting. Thus another peer could be purged + let maybe_ban_op = self.update_connection_state(peer_id, NewConnectionState::Disconnected); + let purged_peers = self.shrink_to_fit(); + (maybe_ban_op, purged_peers) } /// The peer manager has notified us that the peer is undergoing a normal disconnect. Optionally tag @@ -899,12 +968,19 @@ impl PeerDB { } /// Removes banned and disconnected peers from the DB if we have reached any of our limits. - /// Drops the peers with the lowest reputation so that the number of - /// disconnected peers is less than MAX_DC_PEERS - fn shrink_to_fit(&mut self) { + /// Drops the peers with the lowest reputation so that the number of disconnected peers is less + /// than MAX_DC_PEERS + #[must_use = "Unbanned peers need to be reported to libp2p."] + fn shrink_to_fit(&mut self) -> Vec<(PeerId, Vec)> { + let excess_peers = self + .banned_peers_count + .banned_peers() + .saturating_sub(MAX_BANNED_PEERS); + let mut unbanned_peers = Vec::with_capacity(excess_peers); + // Remove excess banned peers while self.banned_peers_count.banned_peers() > MAX_BANNED_PEERS { - if let Some(to_drop) = if let Some((id, info, _)) = self + if let Some((to_drop, unbanned_ips)) = if let Some((id, info, _)) = self .peers .iter() .filter_map(|(id, info)| match info.connection_status() { @@ -915,7 +991,12 @@ impl PeerDB { { self.banned_peers_count .remove_banned_peer(info.seen_ip_addresses()); - Some(*id) + let unbanned_ips = info + .seen_ip_addresses() + .filter(|ip| !self.is_ip_banned(ip)) + .collect::>(); + + Some((*id, unbanned_ips)) } else { // If there is no minimum, this is a coding error. crit!( @@ -928,6 +1009,7 @@ impl PeerDB { } { debug!(self.log, "Removing old banned peer"; "peer_id" => %to_drop); self.peers.remove(&to_drop); + unbanned_peers.push((to_drop, unbanned_ips)) } } @@ -951,6 +1033,8 @@ impl PeerDB { // the count to avoid a potential infinite loop. self.disconnected_peers = self.disconnected_peers.saturating_sub(1); } + + unbanned_peers } /// This handles score transitions between states. It transitions peers states from @@ -1014,6 +1098,11 @@ enum NewConnectionState { /// Whether the peer should be banned after the disconnect occurs. to_ban: bool, }, + /// We are dialing this peer. + Dialing { + /// An optional known ENR for the peer we are dialing. + enr: Option, + }, /// The peer has been disconnected from our local node. Disconnected, /// The peer has been banned and actions to shift the peer to the banned state should be @@ -1331,7 +1420,7 @@ mod tests { assert_eq!(pdb.banned_peers_count.banned_peers(), 0); for p in pdb.connected_peer_ids().cloned().collect::>() { - let _ = pdb.report_peer(&p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(&p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&p); } @@ -1400,9 +1489,19 @@ mod tests { pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); - let _ = pdb.report_peer(&random_peer, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer); - let _ = pdb.report_peer(&random_peer, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); pdb.inject_disconnect(&random_peer); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); @@ -1455,7 +1554,12 @@ mod tests { pdb.disconnected_peers, pdb.banned_peers_count.banned_peers ); // Disconnect and ban peer 2 - let _ = pdb.report_peer(&random_peer2, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer2, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); // Should be 1 disconnected peer and one peer in the process of being disconnected println!( "3:{},{}", @@ -1469,7 +1573,12 @@ mod tests { pdb.disconnected_peers, pdb.banned_peers_count.banned_peers ); // Now that the peer is disconnected, register the ban. - let _ = pdb.report_peer(&random_peer2, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer2, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); // There should be 1 disconnected peer and one banned peer. println!( "5:{},{}", @@ -1483,7 +1592,12 @@ mod tests { pdb.banned_peers().count() ); // Now ban peer 1. - let _ = pdb.report_peer(&random_peer1, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer1, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); // There should be no disconnected peers and 2 banned peers println!( "6:{},{}", @@ -1497,7 +1611,12 @@ mod tests { pdb.disconnected_peers, pdb.banned_peers_count.banned_peers ); // Same thing here. - let _ = pdb.report_peer(&random_peer1, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer1, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); println!( "8:{},{}", pdb.disconnected_peers, pdb.banned_peers_count.banned_peers @@ -1533,7 +1652,12 @@ mod tests { ); // Ban peer 3 - let _ = pdb.report_peer(&random_peer3, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer3, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer3); // This should add a new banned peer, there should be 0 disconnected and 2 banned @@ -1550,7 +1674,12 @@ mod tests { ); // Ban peer 3 - let _ = pdb.report_peer(&random_peer3, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer3, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer3); // Should still have 2 banned peers @@ -1580,7 +1709,12 @@ mod tests { ); // Ban peer 3 - let _ = pdb.report_peer(&random_peer3, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer3, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer3); // Should have 1 disconnect (peer 2) and one banned (peer 3) @@ -1631,7 +1765,12 @@ mod tests { ); // Ban peer 0 - let _ = pdb.report_peer(&random_peer, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer( + &random_peer, + PeerAction::Fatal, + ReportSource::PeerManager, + "", + ); pdb.inject_disconnect(&random_peer); // Should have 1 disconnect ( peer 2) and two banned (peer0, peer 3) @@ -1683,7 +1822,7 @@ mod tests { let p5 = connect_peer_with_ips(&mut pdb, vec![ip5]); for p in &peers[..BANNED_PEERS_PER_IP_THRESHOLD + 1] { - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1699,6 +1838,7 @@ mod tests { &peers[BANNED_PEERS_PER_IP_THRESHOLD + 1], PeerAction::Fatal, ReportSource::PeerManager, + "", ); pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); @@ -1712,6 +1852,7 @@ mod tests { //peers[0] gets unbanned reset_score(&mut pdb, &peers[0]); pdb.update_connection_state(&peers[0], NewConnectionState::Unbanned); + let _ = pdb.shrink_to_fit(); //nothing changed assert!(pdb.ban_status(&p1).is_banned()); @@ -1723,6 +1864,7 @@ mod tests { //peers[1] gets unbanned reset_score(&mut pdb, &peers[1]); pdb.update_connection_state(&peers[1], NewConnectionState::Unbanned); + let _ = pdb.shrink_to_fit(); //all ips are unbanned assert!(!pdb.ban_status(&p1).is_banned()); @@ -1749,7 +1891,7 @@ mod tests { // ban all peers for p in &peers { - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1760,6 +1902,7 @@ mod tests { // unban a peer reset_score(&mut pdb, &peers[0]); pdb.update_connection_state(&peers[0], NewConnectionState::Unbanned); + let _ = pdb.shrink_to_fit(); // check not banned anymore assert!(!pdb.ban_status(&p1).is_banned()); @@ -1769,6 +1912,7 @@ mod tests { for p in &peers { reset_score(&mut pdb, p); pdb.update_connection_state(p, NewConnectionState::Unbanned); + let _ = pdb.shrink_to_fit(); } // add ip2 to all peers and ban them. @@ -1776,7 +1920,7 @@ mod tests { socker_addr.push(Protocol::Tcp(8080)); for p in &peers { pdb.connect_ingoing(p, socker_addr.clone(), None); - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1788,11 +1932,12 @@ mod tests { for p in &peers { reset_score(&mut pdb, p); pdb.update_connection_state(p, NewConnectionState::Unbanned); + let _ = pdb.shrink_to_fit(); } // reban every peer except one for p in &peers[1..] { - let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(p, PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(p); } @@ -1801,7 +1946,7 @@ mod tests { assert!(!pdb.ban_status(&p2).is_banned()); // reban last peer - let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager); + let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&peers[0]); //Ip's are banned again diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index 8f1738ac68..7cc84516a0 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -4,7 +4,7 @@ use libp2p::identify::IdentifyInfo; use serde::Serialize; -use strum::{AsRefStr, AsStaticStr}; +use strum::{AsRefStr, AsStaticStr, EnumIter}; /// Various client and protocol information related to a node. #[derive(Clone, Debug, Serialize)] @@ -21,7 +21,7 @@ pub struct Client { pub agent_string: Option, } -#[derive(Clone, Debug, Serialize, PartialEq, AsRefStr, AsStaticStr)] +#[derive(Clone, Debug, Serialize, PartialEq, AsRefStr, AsStaticStr, EnumIter)] pub enum ClientKind { /// A lighthouse node (the best kind). Lighthouse, diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 59f4571d8b..941ca7e6c9 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -19,8 +19,6 @@ use PeerConnectionStatus::*; #[derive(Clone, Debug, Serialize)] #[serde(bound = "T: EthSpec")] pub struct PeerInfo { - /// The connection status of the peer - _status: PeerStatus, /// The peers reputation score: Score, /// Client managing this peer @@ -57,7 +55,6 @@ pub struct PeerInfo { impl Default for PeerInfo { fn default() -> PeerInfo { PeerInfo { - _status: Default::default(), score: Score::default(), client: Client::default(), connection_status: Default::default(), @@ -236,6 +233,7 @@ impl PeerInfo { /* Mutable Functions */ /// Updates the sync status. Returns true if the status was changed. + // VISIBILITY: Both the peer manager the network sync is able to update the sync state of a peer pub fn update_sync_status(&mut self, sync_status: SyncStatus) -> bool { self.sync_status.update(sync_status) } @@ -320,7 +318,7 @@ impl PeerInfo { /// Modifies the status to Dialing /// Returns an error if the current state is unexpected. - pub(super) fn dialing_peer(&mut self) -> Result<(), &'static str> { + pub(super) fn set_dialing_peer(&mut self) -> Result<(), &'static str> { match &mut self.connection_status { Connected { .. } => return Err("Dialing connected peer"), Dialing { .. } => return Err("Dialing an already dialing peer"), @@ -386,21 +384,6 @@ impl PeerInfo { } } -#[derive(Clone, Debug, Serialize)] -/// The current health status of the peer. -pub enum PeerStatus { - /// The peer is healthy. - Healthy, - /// The peer is clogged. It has not been responding to requests on time. - _Clogged, -} - -impl Default for PeerStatus { - fn default() -> Self { - PeerStatus::Healthy - } -} - /// Connection Direction of connection. #[derive(Debug, Clone, Serialize, AsRefStr)] #[strum(serialize_all = "snake_case")] diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs index 4c9adeb6e5..bab8aa9aeb 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/sync_status.rs @@ -27,6 +27,19 @@ pub struct SyncInfo { pub finalized_root: Hash256, } +impl std::cmp::PartialEq for SyncStatus { + fn eq(&self, other: &Self) -> bool { + matches!( + (self, other), + (SyncStatus::Synced { .. }, SyncStatus::Synced { .. }) + | (SyncStatus::Advanced { .. }, SyncStatus::Advanced { .. }) + | (SyncStatus::Behind { .. }, SyncStatus::Behind { .. }) + | (SyncStatus::IrrelevantPeer, SyncStatus::IrrelevantPeer) + | (SyncStatus::Unknown, SyncStatus::Unknown) + ) + } +} + impl SyncStatus { /// Returns true if the peer has advanced knowledge of the chain. pub fn is_advanced(&self) -> bool { @@ -48,7 +61,7 @@ impl SyncStatus { /// E.g. returns `true` if the state changed from `Synced` to `Advanced`, but not if /// the status remained `Synced` with different `SyncInfo` within. pub fn update(&mut self, new_state: SyncStatus) -> bool { - let changed_status = !(self.is_same_kind(&new_state)); + let changed_status = *self != new_state; *self = new_state; changed_status } @@ -62,17 +75,6 @@ impl SyncStatus { SyncStatus::IrrelevantPeer => "Irrelevant", } } - - pub fn is_same_kind(&self, other: &Self) -> bool { - matches!( - (self, other), - (SyncStatus::Synced { .. }, SyncStatus::Synced { .. }) - | (SyncStatus::Advanced { .. }, SyncStatus::Advanced { .. }) - | (SyncStatus::Behind { .. }, SyncStatus::Behind { .. }) - | (SyncStatus::IrrelevantPeer, SyncStatus::IrrelevantPeer) - | (SyncStatus::Unknown, SyncStatus::Unknown) - ) - } } impl std::fmt::Display for SyncStatus { diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 6c6f0b9bca..0924dca0c0 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, + SignedBeaconBlockBase, SignedBeaconBlockMerge, }; use unsigned_varint::codec::Uvi; @@ -145,7 +145,7 @@ impl Decoder for SSZSnappyInboundCodec { // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of // packet size for ssz container corresponding to `self.protocol`. let ssz_limits = self.protocol.rpc_request_limits(); - if length > self.max_packet_size || ssz_limits.is_out_of_bounds(length) { + if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { return Err(RPCError::InvalidData); } // Calculate worst case compression length for given uncompressed length @@ -280,7 +280,7 @@ impl Decoder for SSZSnappyOutboundCodec { // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of // packet size for ssz container corresponding to `self.protocol`. let ssz_limits = self.protocol.rpc_response_limits::(); - if length > self.max_packet_size || ssz_limits.is_out_of_bounds(length) { + if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { return Err(RPCError::InvalidData); } // Calculate worst case compression length for given uncompressed length @@ -375,7 +375,7 @@ fn handle_error( } /// Returns `Some(context_bytes)` for encoding RPC responses that require context bytes. -/// Returns `None` when context bytes are not required. +/// Returns `None` when context bytes are not required. fn context_bytes( protocol: &ProtocolId, fork_context: &ForkContext, @@ -383,23 +383,24 @@ fn context_bytes( ) -> Option<[u8; CONTEXT_BYTES_LEN]> { // Add the context bytes if required if protocol.has_context_bytes() { - if let RPCCodedResponse::Success(RPCResponse::BlocksByRange(res)) = resp { - if let SignedBeaconBlock::Altair { .. } = **res { - // Altair context being `None` implies that "altair never happened". - // This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case. - return fork_context.to_context_bytes(ForkName::Altair); - } else if let SignedBeaconBlock::Base { .. } = **res { - return Some(fork_context.genesis_context_bytes()); - } - } - - if let RPCCodedResponse::Success(RPCResponse::BlocksByRoot(res)) = resp { - if let SignedBeaconBlock::Altair { .. } = **res { - // Altair context being `None` implies that "altair never happened". - // This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case. - return fork_context.to_context_bytes(ForkName::Altair); - } else if let SignedBeaconBlock::Base { .. } = **res { - return Some(fork_context.genesis_context_bytes()); + if let RPCCodedResponse::Success(rpc_variant) = resp { + if let RPCResponse::BlocksByRange(ref_box_block) + | RPCResponse::BlocksByRoot(ref_box_block) = rpc_variant + { + return match **ref_box_block { + // NOTE: If you are adding another fork type here, be sure to modify the + // `fork_context.to_context_bytes()` function to support it as well! + SignedBeaconBlock::Merge { .. } => { + // Merge context being `None` implies that "merge never happened". + fork_context.to_context_bytes(ForkName::Merge) + } + SignedBeaconBlock::Altair { .. } => { + // Altair context being `None` implies that "altair never happened". + // This code should be unreachable if altair is disabled since only Version::V1 would be valid in that case. + fork_context.to_context_bytes(ForkName::Altair) + } + SignedBeaconBlock::Base { .. } => Some(fork_context.genesis_context_bytes()), + }; } } } @@ -559,6 +560,11 @@ fn handle_v2_response( ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Box::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( + decoded_buffer, + )?), + )))), }, Protocol::BlocksByRoot => match fork_name { ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Box::new( @@ -569,6 +575,11 @@ fn handle_v2_response( ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Box::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( + decoded_buffer, + )?), + )))), }, _ => Err(RPCError::ErrorResponse( RPCResponseErrorCode::InvalidRequest, @@ -686,9 +697,9 @@ mod tests { version: Version, message: &mut BytesMut, ) -> Result>, RPCError> { - let max_packet_size = 1_048_576; let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context()); + let max_packet_size = max_rpc_size(&fork_context); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); // decode message just as snappy message @@ -1113,7 +1124,7 @@ mod tests { ); } - /// Test sending a message with encoded length prefix > MAX_RPC_SIZE. + /// Test sending a message with encoded length prefix > max_rpc_size. #[test] fn test_decode_invalid_length() { // 10 byte snappy stream identifier diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 1a12c26005..37724e028a 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -5,7 +5,7 @@ use super::methods::{ GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination, }; use super::outbound::OutboundRequestContainer; -use super::protocol::{InboundRequest, Protocol, RPCError, RPCProtocol}; +use super::protocol::{max_rpc_size, InboundRequest, Protocol, RPCError, RPCProtocol}; use super::{RPCReceived, RPCSend}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::protocol::InboundFramed; @@ -951,6 +951,7 @@ where OutboundRequestContainer { req: req.clone(), fork_context: self.fork_context.clone(), + max_rpc_size: max_rpc_size(&self.fork_context), }, (), ) diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 3d386148d0..ebd6240616 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -30,7 +30,7 @@ pub use methods::{ RPCResponseErrorCode, RequestId, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS, }; pub(crate) use outbound::OutboundRequest; -pub use protocol::{Protocol, RPCError}; +pub use protocol::{max_rpc_size, Protocol, RPCError}; pub(crate) mod codec; mod handler; @@ -186,6 +186,7 @@ where SubstreamProtocol::new( RPCProtocol { fork_context: self.fork_context.clone(), + max_rpc_size: max_rpc_size(&self.fork_context), phantom: PhantomData, }, (), diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 1c908887ea..17201c6cf4 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -2,7 +2,7 @@ use std::marker::PhantomData; use super::methods::*; use super::protocol::Protocol; -use super::protocol::{ProtocolId, MAX_RPC_SIZE}; +use super::protocol::ProtocolId; use super::RPCError; use crate::rpc::protocol::Encoding; use crate::rpc::protocol::Version; @@ -29,6 +29,7 @@ use types::{EthSpec, ForkContext}; pub struct OutboundRequestContainer { pub req: OutboundRequest, pub fork_context: Arc, + pub max_rpc_size: usize, } #[derive(Debug, Clone, PartialEq)] @@ -150,7 +151,7 @@ where Encoding::SSZSnappy => { let ssz_snappy_codec = BaseOutboundCodec::new(SSZSnappyOutboundCodec::new( protocol, - MAX_RPC_SIZE, + self.max_rpc_size, self.fork_context.clone(), )); OutboundCodec::SSZSnappy(ssz_snappy_codec) diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index c00b9c049b..1e65041991 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,8 +21,8 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, EthSpec, ForkContext, Hash256, MainnetEthSpec, - Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EthSpec, ForkContext, + ForkName, Hash256, MainnetEthSpec, Signature, SignedBeaconBlock, }; lazy_static! { @@ -53,6 +53,18 @@ lazy_static! { ) .as_ssz_bytes() .len(); + + pub static ref SIGNED_BEACON_BLOCK_MERGE_MIN: usize = SignedBeaconBlock::::from_block( + BeaconBlock::Merge(BeaconBlockMerge::::empty(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len(); + + /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. + /// We calculate the value from its fields instead of constructing the block and checking the length. + pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = types::ExecutionPayload::::max_execution_payload_size(); + pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -80,8 +92,10 @@ lazy_static! { } -/// The maximum bytes that can be sent across the RPC. -pub const MAX_RPC_SIZE: usize = 1_048_576; // 1M +/// The maximum bytes that can be sent across the RPC pre-merge. +pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M +/// The maximum bytes that can be sent across the RPC post-merge. +pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). @@ -90,6 +104,15 @@ const TTFB_TIMEOUT: u64 = 5; /// established before the stream is terminated. const REQUEST_TIMEOUT: u64 = 15; +/// Returns the maximum bytes that can be sent across the RPC. +pub fn max_rpc_size(fork_context: &ForkContext) -> usize { + if fork_context.fork_exists(ForkName::Merge) { + MAX_RPC_SIZE_POST_MERGE + } else { + MAX_RPC_SIZE + } +} + /// Protocol names to be used. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Protocol { @@ -158,6 +181,7 @@ impl std::fmt::Display for Version { #[derive(Debug, Clone)] pub struct RPCProtocol { pub fork_context: Arc, + pub max_rpc_size: usize, pub phantom: PhantomData, } @@ -194,9 +218,10 @@ impl RpcLimits { Self { min, max } } - /// Returns true if the given length is out of bounds, false otherwise. - pub fn is_out_of_bounds(&self, length: usize) -> bool { - length > self.max || length < self.min + /// Returns true if the given length is greater than `max_rpc_size` or out of + /// bounds for the given ssz type, returns false otherwise. + pub fn is_out_of_bounds(&self, length: usize, max_rpc_size: usize) -> bool { + length > std::cmp::min(self.max, max_rpc_size) || length < self.min } } @@ -253,12 +278,18 @@ impl ProtocolId { Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response Protocol::BlocksByRange => RpcLimits::new( std::cmp::min( - *SIGNED_BEACON_BLOCK_ALTAIR_MIN, - *SIGNED_BEACON_BLOCK_BASE_MIN, + std::cmp::min( + *SIGNED_BEACON_BLOCK_ALTAIR_MIN, + *SIGNED_BEACON_BLOCK_BASE_MIN, + ), + *SIGNED_BEACON_BLOCK_MERGE_MIN, ), std::cmp::max( - *SIGNED_BEACON_BLOCK_ALTAIR_MAX, - *SIGNED_BEACON_BLOCK_BASE_MAX, + std::cmp::max( + *SIGNED_BEACON_BLOCK_ALTAIR_MAX, + *SIGNED_BEACON_BLOCK_BASE_MAX, + ), + *SIGNED_BEACON_BLOCK_MERGE_MAX, ), ), Protocol::BlocksByRoot => RpcLimits::new( @@ -346,7 +377,7 @@ where Encoding::SSZSnappy => { let ssz_snappy_codec = BaseInboundCodec::new(SSZSnappyInboundCodec::new( protocol, - MAX_RPC_SIZE, + self.max_rpc_size, self.fork_context.clone(), )); InboundCodec::SSZSnappy(ssz_snappy_codec) diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs index 3ecd32f3d9..0ccdd28fdf 100644 --- a/beacon_node/lighthouse_network/src/service.rs +++ b/beacon_node/lighthouse_network/src/service.rs @@ -1,6 +1,7 @@ use crate::behaviour::{ save_metadata_to_disk, Behaviour, BehaviourEvent, PeerRequestId, Request, Response, }; +use crate::config::NetworkLoad; use crate::discovery::enr; use crate::multiaddr::Protocol; use crate::rpc::{ @@ -20,6 +21,7 @@ use libp2p::{ swarm::{SwarmBuilder, SwarmEvent}, PeerId, Swarm, Transport, }; +use open_metrics_client::registry::Registry; use slog::{crit, debug, info, o, trace, warn, Logger}; use ssz::Decode; use std::fs::File; @@ -62,27 +64,34 @@ pub struct Service { pub log: Logger, } +pub struct Context<'a> { + pub config: &'a NetworkConfig, + pub enr_fork_id: EnrForkId, + pub fork_context: Arc, + pub chain_spec: &'a ChainSpec, + pub gossipsub_registry: Option<&'a mut Registry>, +} + impl Service { pub async fn new( executor: task_executor::TaskExecutor, - config: &NetworkConfig, - enr_fork_id: EnrForkId, + ctx: Context<'_>, log: &Logger, - fork_context: Arc, - chain_spec: &ChainSpec, ) -> error::Result<(Arc>, Self)> { let log = log.new(o!("service"=> "libp2p")); trace!(log, "Libp2p Service starting"); + let config = ctx.config; // initialise the node's ID let local_keypair = load_private_key(config, &log); // Create an ENR or load from disk if appropriate let enr = - enr::build_or_load_enr::(local_keypair.clone(), config, enr_fork_id, &log)?; + enr::build_or_load_enr::(local_keypair.clone(), config, &ctx.enr_fork_id, &log)?; let local_peer_id = enr.peer_id(); + // Construct the metadata let meta_data = load_or_build_metadata(&config.network_dir, &log); // set up a collection of variables accessible outside of the network crate @@ -99,7 +108,7 @@ impl Service { &log, )); - info!(log, "Libp2p Service"; "peer_id" => %enr.peer_id()); + info!(log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); let discovery_string = if config.disable_discovery { "None".into() } else { @@ -113,15 +122,8 @@ impl Service { .map_err(|e| format!("Failed to build transport: {:?}", e))?; // Lighthouse network behaviour - let behaviour = Behaviour::new( - &local_keypair, - config.clone(), - network_globals.clone(), - &log, - fork_context, - chain_spec, - ) - .await?; + let behaviour = + Behaviour::new(&local_keypair, ctx, network_globals.clone(), &log).await?; // use the executor for libp2p struct Executor(task_executor::TaskExecutor); @@ -215,7 +217,8 @@ impl Service { } if !network_globals - .peers() + .peers + .read() .is_connected_or_dialing(&bootnode_enr.peer_id()) { dial(multiaddr.clone()); @@ -278,11 +281,17 @@ impl Service { } /// Report a peer's action. - pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction, source: ReportSource) { + pub fn report_peer( + &mut self, + peer_id: &PeerId, + action: PeerAction, + source: ReportSource, + msg: &'static str, + ) { self.swarm .behaviour_mut() .peer_manager_mut() - .report_peer(peer_id, action, source, None); + .report_peer(peer_id, action, source, None, msg); } /// Disconnect and ban a peer, providing a reason. diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index db00cf3c03..aadd13a236 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -1,6 +1,6 @@ //! A collection of variables that are accessible outside of the network thread itself. use crate::peer_manager::peerdb::PeerDB; -use crate::rpc::MetaData; +use crate::rpc::{MetaData, MetaDataV2}; use crate::types::{BackFillState, SyncState}; use crate::Client; use crate::EnrExt; @@ -22,7 +22,7 @@ pub struct NetworkGlobals { /// The UDP port that the discovery service is listening on pub listen_port_udp: AtomicU16, /// The collection of known peers. - peers: RwLock>, + pub peers: RwLock>, // The local meta data of our node. pub local_metadata: RwLock>, /// The current gossipsub topic subscriptions. @@ -121,18 +121,31 @@ impl NetworkGlobals { .unwrap_or_default() } - pub fn peers(&self) -> impl std::ops::Deref> + '_ { - self.peers.read() - } - - pub(crate) fn peers_mut(&self) -> impl std::ops::DerefMut> + '_ { - self.peers.write() - } - /// Updates the syncing state of the node. /// /// The old state is returned pub fn set_sync_state(&self, new_state: SyncState) -> SyncState { std::mem::replace(&mut *self.sync_state.write(), new_state) } + + /// TESTING ONLY. Build a dummy NetworkGlobals instance. + pub fn new_test_globals(log: &slog::Logger) -> NetworkGlobals { + use crate::CombinedKeyExt; + let keypair = libp2p::identity::Keypair::generate_secp256k1(); + let enr_key: discv5::enr::CombinedKey = + discv5::enr::CombinedKey::from_libp2p(&keypair).unwrap(); + let enr = discv5::enr::EnrBuilder::new("v4").build(&enr_key).unwrap(); + NetworkGlobals::new( + enr, + 9000, + 9000, + MetaData::V2(MetaDataV2 { + seq_number: 0, + attnets: Default::default(), + syncnets: Default::default(), + }), + vec![], + log, + ) + } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 75ef6e8ab2..af2656a275 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -10,7 +10,8 @@ use std::io::{Error, ErrorKind}; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlockMerge, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -161,6 +162,10 @@ impl PubsubMessage { SignedBeaconBlockAltair::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Merge) => SignedBeaconBlock::::Merge( + SignedBeaconBlockMerge::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", diff --git a/beacon_node/lighthouse_network/tests/common/behaviour.rs b/beacon_node/lighthouse_network/tests/common/behaviour.rs new file mode 100644 index 0000000000..ab4ae901f2 --- /dev/null +++ b/beacon_node/lighthouse_network/tests/common/behaviour.rs @@ -0,0 +1,349 @@ +// NOTE: Taken from libp2p's swarm's testing utils. +// +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::collections::HashMap; +use std::task::{Context, Poll}; + +use libp2p::core::connection::{ConnectedPoint, ConnectionId, ListenerId}; +use libp2p::swarm::protocols_handler::{ + DummyProtocolsHandler, IntoProtocolsHandler, ProtocolsHandler, +}; +use libp2p::swarm::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use libp2p::{Multiaddr, PeerId}; + +/// A `MockBehaviour` is a `NetworkBehaviour` that allows for +/// the instrumentation of return values, without keeping +/// any further state. +pub struct MockBehaviour< + THandler = DummyProtocolsHandler, + TOutEvent = ::OutEvent, +> where + THandler: ProtocolsHandler, +{ + /// The prototype protocols handler that is cloned for every + /// invocation of `new_handler`. + pub handler_proto: THandler, + /// The addresses to return from `addresses_of_peer`. + pub addresses: HashMap>, + /// The next action to return from `poll`. + /// + /// An action is only returned once. + pub next_action: Option>, +} + +impl MockBehaviour +where + THandler: ProtocolsHandler, +{ + pub fn new(handler_proto: THandler) -> Self { + MockBehaviour { + handler_proto, + addresses: HashMap::new(), + next_action: None, + } + } +} + +impl NetworkBehaviour for MockBehaviour +where + THandler: ProtocolsHandler + Clone, + THandler::OutEvent: Clone, + TOutEvent: Send + 'static, +{ + type ProtocolsHandler = THandler; + type OutEvent = TOutEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + self.handler_proto.clone() + } + + fn addresses_of_peer(&mut self, p: &PeerId) -> Vec { + self.addresses.get(p).map_or(Vec::new(), |v| v.clone()) + } + + fn inject_event(&mut self, _: PeerId, _: ConnectionId, _: THandler::OutEvent) {} + + fn poll( + &mut self, + _: &mut Context, + _: &mut impl PollParameters, + ) -> Poll> { + Option::take(&mut self.next_action).map_or(Poll::Pending, Poll::Ready) + } +} + +/// A `CallTraceBehaviour` is a `NetworkBehaviour` that tracks invocations of callback methods and +/// their arguments, wrapping around an inner behaviour. It ensures certain invariants are met. +pub struct CallTraceBehaviour +where + TInner: NetworkBehaviour, +{ + inner: TInner, + + pub addresses_of_peer: Vec, + pub inject_connected: Vec, + pub inject_disconnected: Vec, + pub inject_connection_established: Vec<(PeerId, ConnectionId, ConnectedPoint)>, + pub inject_connection_closed: Vec<(PeerId, ConnectionId, ConnectedPoint)>, + pub inject_event: Vec<( + PeerId, + ConnectionId, + <::Handler as ProtocolsHandler>::OutEvent, + )>, + pub inject_dial_failure: Vec>, + pub inject_new_listener: Vec, + pub inject_new_listen_addr: Vec<(ListenerId, Multiaddr)>, + pub inject_new_external_addr: Vec, + pub inject_expired_listen_addr: Vec<(ListenerId, Multiaddr)>, + pub inject_expired_external_addr: Vec, + pub inject_listener_error: Vec, + pub inject_listener_closed: Vec<(ListenerId, bool)>, + pub poll: usize, +} + +impl CallTraceBehaviour +where + TInner: NetworkBehaviour, +{ + pub fn new(inner: TInner) -> Self { + Self { + inner, + addresses_of_peer: Vec::new(), + inject_connected: Vec::new(), + inject_disconnected: Vec::new(), + inject_connection_established: Vec::new(), + inject_connection_closed: Vec::new(), + inject_event: Vec::new(), + inject_dial_failure: Vec::new(), + inject_new_listener: Vec::new(), + inject_new_listen_addr: Vec::new(), + inject_new_external_addr: Vec::new(), + inject_expired_listen_addr: Vec::new(), + inject_expired_external_addr: Vec::new(), + inject_listener_error: Vec::new(), + inject_listener_closed: Vec::new(), + poll: 0, + } + } + + #[allow(dead_code)] + pub fn reset(&mut self) { + self.addresses_of_peer = Vec::new(); + self.inject_connected = Vec::new(); + self.inject_disconnected = Vec::new(); + self.inject_connection_established = Vec::new(); + self.inject_connection_closed = Vec::new(); + self.inject_event = Vec::new(); + self.inject_dial_failure = Vec::new(); + self.inject_new_listen_addr = Vec::new(); + self.inject_new_external_addr = Vec::new(); + self.inject_expired_listen_addr = Vec::new(); + self.inject_listener_error = Vec::new(); + self.inject_listener_closed = Vec::new(); + self.poll = 0; + } + + pub fn inner(&mut self) -> &mut TInner { + &mut self.inner + } + + /// Checks that when the expected number of closed connection notifications are received, a + /// given number of expected disconnections have been received as well. + /// + /// Returns if the first condition is met. + pub fn assert_disconnected( + &self, + expected_closed_connections: usize, + expected_disconnections: usize, + ) -> bool { + if self.inject_connection_closed.len() == expected_closed_connections { + assert_eq!(self.inject_disconnected.len(), expected_disconnections); + return true; + } + + false + } + + /// Checks that when the expected number of established connection notifications are received, + /// a given number of expected connections have been received as well. + /// + /// Returns if the first condition is met. + pub fn assert_connected( + &self, + expected_established_connections: usize, + expected_connections: usize, + ) -> bool { + if self.inject_connection_established.len() == expected_established_connections { + assert_eq!(self.inject_connected.len(), expected_connections); + return true; + } + + false + } +} + +impl NetworkBehaviour for CallTraceBehaviour +where + TInner: NetworkBehaviour, + <::Handler as ProtocolsHandler>::OutEvent: + Clone, +{ + type ProtocolsHandler = TInner::ProtocolsHandler; + type OutEvent = TInner::OutEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + self.inner.new_handler() + } + + fn addresses_of_peer(&mut self, p: &PeerId) -> Vec { + self.addresses_of_peer.push(*p); + self.inner.addresses_of_peer(p) + } + + fn inject_connected(&mut self, peer: &PeerId) { + assert!( + self.inject_connection_established + .iter() + .any(|(peer_id, _, _)| peer_id == peer), + "`inject_connected` is called after at least one `inject_connection_established`." + ); + self.inject_connected.push(*peer); + self.inner.inject_connected(peer); + } + + fn inject_connection_established( + &mut self, + p: &PeerId, + c: &ConnectionId, + e: &ConnectedPoint, + errors: Option<&Vec>, + ) { + self.inject_connection_established.push((*p, *c, e.clone())); + self.inner.inject_connection_established(p, c, e, errors); + } + + fn inject_disconnected(&mut self, peer: &PeerId) { + assert!( + self.inject_connection_closed + .iter() + .any(|(peer_id, _, _)| peer_id == peer), + "`inject_disconnected` is called after at least one `inject_connection_closed`." + ); + self.inject_disconnected.push(*peer); + self.inner.inject_disconnected(peer); + } + + fn inject_connection_closed( + &mut self, + p: &PeerId, + c: &ConnectionId, + e: &ConnectedPoint, + handler: ::Handler, + ) { + let connection = (*p, *c, e.clone()); + assert!( + self.inject_connection_established.contains(&connection), + "`inject_connection_closed` is called only for connections for \ + which `inject_connection_established` was called first." + ); + self.inject_connection_closed.push(connection); + self.inner.inject_connection_closed(p, c, e, handler); + } + + fn inject_event( + &mut self, + p: PeerId, + c: ConnectionId, + e: <::Handler as ProtocolsHandler>::OutEvent, + ) { + assert!( + self.inject_connection_established + .iter() + .any(|(peer_id, conn_id, _)| *peer_id == p && c == *conn_id), + "`inject_event` is called for reported connections." + ); + assert!( + !self + .inject_connection_closed + .iter() + .any(|(peer_id, conn_id, _)| *peer_id == p && c == *conn_id), + "`inject_event` is never called for closed connections." + ); + + self.inject_event.push((p, c, e.clone())); + self.inner.inject_event(p, c, e); + } + + fn inject_dial_failure( + &mut self, + p: Option, + handler: Self::ProtocolsHandler, + error: &DialError, + ) { + self.inject_dial_failure.push(p); + self.inner.inject_dial_failure(p, handler, error); + } + + fn inject_new_listener(&mut self, id: ListenerId) { + self.inject_new_listener.push(id); + self.inner.inject_new_listener(id); + } + + fn inject_new_listen_addr(&mut self, id: ListenerId, a: &Multiaddr) { + self.inject_new_listen_addr.push((id, a.clone())); + self.inner.inject_new_listen_addr(id, a); + } + + fn inject_expired_listen_addr(&mut self, id: ListenerId, a: &Multiaddr) { + self.inject_expired_listen_addr.push((id, a.clone())); + self.inner.inject_expired_listen_addr(id, a); + } + + fn inject_new_external_addr(&mut self, a: &Multiaddr) { + self.inject_new_external_addr.push(a.clone()); + self.inner.inject_new_external_addr(a); + } + + fn inject_expired_external_addr(&mut self, a: &Multiaddr) { + self.inject_expired_external_addr.push(a.clone()); + self.inner.inject_expired_external_addr(a); + } + + fn inject_listener_error(&mut self, l: ListenerId, e: &(dyn std::error::Error + 'static)) { + self.inject_listener_error.push(l); + self.inner.inject_listener_error(l, e); + } + + fn inject_listener_closed(&mut self, l: ListenerId, r: Result<(), &std::io::Error>) { + self.inject_listener_closed.push((l, r.is_ok())); + self.inner.inject_listener_closed(l, r); + } + + fn poll( + &mut self, + cx: &mut Context, + args: &mut impl PollParameters, + ) -> Poll> { + self.poll += 1; + self.inner.poll(cx, args) + } +} diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index 3d3a4d5778..7deb2108b0 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -13,15 +13,23 @@ use std::time::Duration; use tokio::runtime::Runtime; use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, Hash256, MinimalEthSpec}; +#[allow(clippy::type_complexity)] +#[allow(unused)] +pub mod behaviour; +#[allow(clippy::type_complexity)] +#[allow(unused)] +pub mod swarm; + type E = MinimalEthSpec; use tempfile::Builder as TempBuilder; /// Returns a dummy fork context -fn fork_context() -> ForkContext { +pub fn fork_context() -> ForkContext { let mut chain_spec = E::default_spec(); // Set fork_epoch to `Some` to ensure that the `ForkContext` object // includes altair in the list of forks chain_spec.altair_fork_epoch = Some(types::Epoch::new(42)); + chain_spec.bellatrix_fork_epoch = Some(types::Epoch::new(84)); ForkContext::new::(types::Slot::new(0), Hash256::zero(), &chain_spec) } @@ -40,6 +48,7 @@ impl std::ops::DerefMut for Libp2pInstance { } } +#[allow(unused)] pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); @@ -119,19 +128,18 @@ pub async fn build_libp2p_instance( let (signal, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx); - let fork_context = Arc::new(fork_context()); + let libp2p_context = lighthouse_network::Context { + config: &config, + enr_fork_id: EnrForkId::default(), + fork_context: Arc::new(fork_context()), + chain_spec: &ChainSpec::minimal(), + gossipsub_registry: None, + }; Libp2pInstance( - LibP2PService::new( - executor, - &config, - EnrForkId::default(), - &log, - fork_context, - &ChainSpec::minimal(), - ) - .await - .expect("should build libp2p instance") - .1, + LibP2PService::new(executor, libp2p_context, &log) + .await + .expect("should build libp2p instance") + .1, signal, ) } diff --git a/beacon_node/lighthouse_network/tests/common/swarm.rs b/beacon_node/lighthouse_network/tests/common/swarm.rs new file mode 100644 index 0000000000..2930e2e4da --- /dev/null +++ b/beacon_node/lighthouse_network/tests/common/swarm.rs @@ -0,0 +1,99 @@ +use std::collections::HashMap; +use std::pin::Pin; + +use super::behaviour::{CallTraceBehaviour, MockBehaviour}; + +use futures::stream::Stream; +use futures::task::{Context, Poll}; +use libp2p::swarm::protocols_handler::ProtocolsHandler; +use libp2p::swarm::{IntoProtocolsHandler, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::{PeerId, Transport}; + +use futures::StreamExt; + +pub fn new_test_swarm(behaviour: B) -> Swarm +where + B: NetworkBehaviour, +{ + let id_keys = libp2p::identity::Keypair::generate_ed25519(); + let local_public_key = id_keys.public(); + let transport = libp2p::core::transport::MemoryTransport::default() + .upgrade(libp2p::core::upgrade::Version::V1) + .authenticate(libp2p::plaintext::PlainText2Config { + local_public_key: local_public_key.clone(), + }) + .multiplex(libp2p::yamux::YamuxConfig::default()) + .boxed(); + SwarmBuilder::new(transport, behaviour, local_public_key.into()).build() +} + +pub fn random_multiaddr() -> libp2p::multiaddr::Multiaddr { + libp2p::multiaddr::Protocol::Memory(rand::random::()).into() +} + +/// Bind a memory multiaddr to a compatible swarm. +pub async fn bind_listener( + swarm: &mut Swarm, +) -> libp2p::multiaddr::Multiaddr { + swarm.listen_on(random_multiaddr()).unwrap(); + match swarm.select_next_some().await { + SwarmEvent::NewListenAddr { + listener_id: _, + address, + } => address, + _ => panic!("Testing swarm's first event should be a new listener"), + } +} + +#[derive(Default)] +pub struct SwarmPool { + swarms: HashMap>, +} + +impl SwarmPool { + pub fn with_capacity(capacity: usize) -> Self { + Self { + swarms: HashMap::with_capacity(capacity), + } + } + pub fn insert(&mut self, swarm: Swarm) -> PeerId { + let peer_id = *swarm.local_peer_id(); + self.swarms.insert(peer_id, swarm); + peer_id + } + + pub fn remove(&mut self, peer_id: &PeerId) { + self.swarms.remove(peer_id); + } + + pub fn get_mut(&mut self, peer_id: &PeerId) -> Option<&mut Swarm> { + self.swarms.get_mut(peer_id) + } + + pub fn swarms(&self) -> &HashMap> { + &self.swarms + } + + pub fn swarms_mut(&mut self) -> &mut HashMap> { + &mut self.swarms + } +} + +impl Stream for SwarmPool +where + B: NetworkBehaviour, + ::ProtocolsHandler: ProtocolsHandler, +{ + type Item = (PeerId, + SwarmEvent<::OutEvent, <<::ProtocolsHandler as IntoProtocolsHandler>::Handler as ProtocolsHandler>::Error>); + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut polls = self + .get_mut() + .swarms + .iter_mut() + .map(|(&peer_id, swarm)| swarm.map(move |ev| (peer_id, ev))) + .collect::>(); + polls.poll_next_unpin(cx) + } +} diff --git a/beacon_node/lighthouse_network/tests/pm_tests.rs b/beacon_node/lighthouse_network/tests/pm_tests.rs new file mode 100644 index 0000000000..9b26e4939f --- /dev/null +++ b/beacon_node/lighthouse_network/tests/pm_tests.rs @@ -0,0 +1,205 @@ +#![cfg(not(debug_assertions))] + +mod common; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use common::{ + behaviour::{CallTraceBehaviour, MockBehaviour}, + swarm, +}; +use lighthouse_network::{ + peer_manager::{config::Config, PeerManagerEvent}, + NetworkGlobals, PeerAction, PeerInfo, PeerManager, ReportSource, +}; +use types::MinimalEthSpec as E; + +use futures::StreamExt; +use libp2p::{ + core::either::EitherError, + swarm::SwarmEvent, + swarm::{protocols_handler::DummyProtocolsHandler, DummyBehaviour, KeepAlive, Swarm}, + NetworkBehaviour, +}; + +use slog::debug; + +/// Struct that mimics the lighthouse_network::Service with respect to handling peer manager +/// events. +// TODO: make this a real struct for more accurate testing. +struct Service { + swarm: Swarm, +} + +impl Service { + async fn select_next_some(&mut self) -> SwarmEvent> { + let ev = self.swarm.select_next_some().await; + match &ev { + SwarmEvent::Behaviour(Ev(PeerManagerEvent::Banned(peer_id, _addr_vec))) => { + self.swarm.ban_peer_id(*peer_id); + } + SwarmEvent::Behaviour(Ev(PeerManagerEvent::UnBanned(peer_id, _addr_vec))) => { + self.swarm.unban_peer_id(*peer_id); + } + SwarmEvent::Behaviour(Ev(PeerManagerEvent::DisconnectPeer(peer_id, _reason))) => { + // directly disconnect here. + let _ = self.swarm.disconnect_peer_id(*peer_id); + } + _ => {} + } + ev + } +} + +#[derive(Debug)] +struct Ev(PeerManagerEvent); +impl From for Ev { + fn from(_: void::Void) -> Self { + unreachable!("No events are emmited") + } +} +impl From for Ev { + fn from(ev: PeerManagerEvent) -> Self { + Ev(ev) + } +} + +#[derive(NetworkBehaviour)] +#[behaviour(out_event = "Ev")] +struct Behaviour { + pm_call_trace: CallTraceBehaviour>, + sibling: MockBehaviour, +} + +impl Behaviour { + fn new(pm: PeerManager) -> Self { + Behaviour { + pm_call_trace: CallTraceBehaviour::new(pm), + sibling: MockBehaviour::new(DummyProtocolsHandler { + // The peer manager votes No, so we make sure the combined handler stays alive this + // way. + keep_alive: KeepAlive::Yes, + }), + } + } +} + +#[tokio::test] +async fn banned_peers_consistency() { + let log = common::build_log(slog::Level::Debug, false); + let pm_log = log.new(slog::o!("who" => "[PM]")); + let globals: Arc> = Arc::new(NetworkGlobals::new_test_globals(&log)); + + // Build the peer manager. + let (mut pm_service, pm_addr) = { + let pm_config = Config { + discovery_enabled: false, + ..Default::default() + }; + let pm = PeerManager::new(pm_config, globals.clone(), &pm_log) + .await + .unwrap(); + let mut pm_swarm = swarm::new_test_swarm(Behaviour::new(pm)); + let pm_addr = swarm::bind_listener(&mut pm_swarm).await; + let service = Service { swarm: pm_swarm }; + (service, pm_addr) + }; + + let excess_banned_peers = 15; + let peers_to_ban = + lighthouse_network::peer_manager::peerdb::MAX_BANNED_PEERS + excess_banned_peers; + + // Build all the dummy peers needed. + let (mut swarm_pool, peers) = { + let mut pool = swarm::SwarmPool::with_capacity(peers_to_ban); + let mut peers = HashSet::with_capacity(peers_to_ban); + for _ in 0..peers_to_ban { + let mut peer_swarm = + swarm::new_test_swarm(DummyBehaviour::with_keep_alive(KeepAlive::Yes)); + let _peer_addr = swarm::bind_listener(&mut peer_swarm).await; + // It is ok to dial all at the same time since the swarm handles an event at a time. + peer_swarm.dial(pm_addr.clone()).unwrap(); + let peer_id = pool.insert(peer_swarm); + peers.insert(peer_id); + } + (pool, peers) + }; + + // we track banned peers at the swarm level here since there is no access to that info. + let mut swarm_banned_peers = HashMap::with_capacity(peers_to_ban); + let mut peers_unbanned = 0; + let timeout = tokio::time::sleep(tokio::time::Duration::from_secs(30)); + futures::pin_mut!(timeout); + + loop { + // poll the pm and dummy swarms. + tokio::select! { + pm_event = pm_service.select_next_some() => { + debug!(log, "[PM] {:?}", pm_event); + match pm_event { + SwarmEvent::Behaviour(Ev(ev)) => match ev { + PeerManagerEvent::Banned(peer_id, _) => { + let has_been_unbanned = false; + swarm_banned_peers.insert(peer_id, has_been_unbanned); + } + PeerManagerEvent::UnBanned(peer_id, _) => { + *swarm_banned_peers.get_mut(&peer_id).expect("Unbanned peer must be banned first") = true; + peers_unbanned += 1; + } + _ => {} + } + SwarmEvent::ConnectionEstablished { + peer_id, + endpoint: _, + num_established: _, + concurrent_dial_errors: _, + } => { + assert!(peers.contains(&peer_id)); + // now we report the peer as banned. + pm_service + .swarm + .behaviour_mut() + .pm_call_trace + .inner() + .report_peer( + &peer_id, + PeerAction::Fatal, + ReportSource::Processor, + None, + "" + ); + }, + _ => {} + } + } + Some((_peer_id, _peer_ev)) = swarm_pool.next() => { + // we need to poll the swarms to keep the peers going + } + _ = timeout.as_mut() => { + panic!("Test timeout.") + } + } + + if peers_unbanned == excess_banned_peers { + let pdb = globals.peers.read(); + let inconsistencies = swarm_banned_peers + .into_iter() + .map(|(peer_id, was_unbanned)| { + was_unbanned + != pdb.peer_info(&peer_id).map_or( + false, /* We forgot about a banned peer */ + PeerInfo::is_banned, + ) + }); + assert_eq!( + inconsistencies + .filter(|is_consistent| *is_consistent) + .count(), + peers_to_ban + ); + return; + } + } +} diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 565304a79b..b270765f8c 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,21 +1,52 @@ #![cfg(test)] use lighthouse_network::rpc::methods::*; -use lighthouse_network::{BehaviourEvent, Libp2pEvent, ReportSource, Request, Response}; +use lighthouse_network::{ + rpc::max_rpc_size, BehaviourEvent, Libp2pEvent, ReportSource, Request, Response, +}; use slog::{debug, warn, Level}; +use ssz::Encode; use ssz_types::VariableList; use std::sync::Arc; use std::time::Duration; use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, Epoch, EthSpec, Hash256, MinimalEthSpec, - Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, EthSpec, ForkContext, + Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; mod common; type E = MinimalEthSpec; +/// Merge block with length < max_rpc_size. +fn merge_block_small(fork_context: &ForkContext) -> BeaconBlock { + let mut block = BeaconBlockMerge::empty(&E::default_spec()); + let tx = VariableList::from(vec![0; 1024]); + let txs = VariableList::from(std::iter::repeat(tx).take(100).collect::>()); + + block.body.execution_payload.transactions = txs; + + let block = BeaconBlock::Merge(block); + assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context)); + block +} + +/// Merge block with length > MAX_RPC_SIZE. +/// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. +/// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. +fn merge_block_large(fork_context: &ForkContext) -> BeaconBlock { + let mut block = BeaconBlockMerge::empty(&E::default_spec()); + let tx = VariableList::from(vec![0; 1024]); + let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); + + block.body.execution_payload.transactions = txs; + + let block = BeaconBlock::Merge(block); + assert!(block.ssz_bytes_len() > max_rpc_size(fork_context)); + block +} + // Tests the STATUS RPC message #[test] #[allow(clippy::single_match)] @@ -118,10 +149,10 @@ fn test_status_rpc() { #[allow(clippy::single_match)] fn test_blocks_by_range_chunked_rpc() { // set up the logging. The level and enabled logging or not - let log_level = Level::Trace; + let log_level = Level::Debug; let enable_logging = false; - let messages_to_send = 10; + let messages_to_send = 6; let log = common::build_log(log_level, enable_logging); @@ -149,8 +180,13 @@ fn test_blocks_by_range_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let full_block = merge_block_small(&common::fork_context()); + let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); + let rpc_response_merge_small = Response::BlocksByRange(Some(Box::new(signed_full_block))); + // keep count of the number of messages received let mut messages_received = 0; + let request_id = RequestId::Sync(messages_to_send as usize); // build the sender future let sender_future = async { loop { @@ -160,28 +196,30 @@ fn test_blocks_by_range_chunked_rpc() { debug!(log, "Sending RPC"); sender.swarm.behaviour_mut().send_request( peer_id, - RequestId::Sync(10), + request_id, rpc_request.clone(), ); } Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { peer_id: _, - id: RequestId::Sync(10), + id: _, response, }) => { warn!(log, "Sender received a response"); match response { Response::BlocksByRange(Some(_)) => { - if messages_received < 5 { + if messages_received < 2 { assert_eq!(response, rpc_response_base.clone()); - } else { + } else if messages_received < 4 { assert_eq!(response, rpc_response_altair.clone()); + } else { + assert_eq!(response, rpc_response_merge_small.clone()); } messages_received += 1; warn!(log, "Chunk received"); } Response::BlocksByRange(None) => { - // should be exactly 10 messages before terminating + // should be exactly `messages_to_send` messages before terminating assert_eq!(messages_received, messages_to_send); // end the test return; @@ -207,12 +245,14 @@ fn test_blocks_by_range_chunked_rpc() { // send the response warn!(log, "Receiver got request"); for i in 0..messages_to_send { - // Send first half of responses as base blocks and - // second half as altair blocks. - let rpc_response = if i < 5 { + // Send first third of responses as base blocks, + // second as altair and third as merge. + let rpc_response = if i < 2 { rpc_response_base.clone() - } else { + } else if i < 4 { rpc_response_altair.clone() + } else { + rpc_response_merge_small.clone() }; receiver.swarm.behaviour_mut().send_successful_response( peer_id, @@ -236,8 +276,105 @@ fn test_blocks_by_range_chunked_rpc() { tokio::select! { _ = sender_future => {} _ = receiver_future => {} - _ = sleep(Duration::from_secs(10)) => { - panic!("Future timed out"); + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); + } + } + }) +} + +// Tests rejection of blocks over `MAX_RPC_SIZE`. +#[test] +#[allow(clippy::single_match)] +fn test_blocks_by_range_over_limit() { + // set up the logging. The level and enabled logging or not + let log_level = Level::Debug; + let enable_logging = false; + + let messages_to_send = 5; + + let log = common::build_log(log_level, enable_logging); + + let rt = Arc::new(Runtime::new().unwrap()); + + rt.block_on(async { + // get sender/receiver + let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + + // BlocksByRange Request + let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { + start_slot: 0, + count: messages_to_send, + step: 0, + }); + + // BlocksByRange Response + let full_block = merge_block_large(&common::fork_context()); + let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); + let rpc_response_merge_large = Response::BlocksByRange(Some(Box::new(signed_full_block))); + + let request_id = RequestId::Sync(messages_to_send as usize); + // build the sender future + let sender_future = async { + loop { + match sender.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + // Send a STATUS message + debug!(log, "Sending RPC"); + sender.swarm.behaviour_mut().send_request( + peer_id, + request_id, + rpc_request.clone(), + ); + } + // The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE + Libp2pEvent::Behaviour(BehaviourEvent::RPCFailed { id, .. }) => { + assert_eq!(id, request_id); + return; + } + _ => {} // Ignore other behaviour events + } + } + }; + + // build the receiver future + let receiver_future = async { + loop { + match receiver.next_event().await { + Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + peer_id, + id, + request, + }) => { + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); + for _ in 0..messages_to_send { + let rpc_response = rpc_response_merge_large.clone(); + receiver.swarm.behaviour_mut().send_successful_response( + peer_id, + id, + rpc_response.clone(), + ); + } + // send the stream termination + receiver.swarm.behaviour_mut().send_successful_response( + peer_id, + id, + Response::BlocksByRange(None), + ); + } + } + _ => {} // Ignore other events + } + } + }; + + tokio::select! { + _ = sender_future => {} + _ = receiver_future => {} + _ = sleep(Duration::from_secs(30)) => { + panic!("Future timed out"); } } }) @@ -276,6 +413,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { // keep count of the number of messages received let mut messages_received: u64 = 0; + let request_id = RequestId::Sync(messages_to_send as usize); // build the sender future let sender_future = async { loop { @@ -285,13 +423,13 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { debug!(log, "Sending RPC"); sender.swarm.behaviour_mut().send_request( peer_id, - RequestId::Sync(10), + request_id, rpc_request.clone(), ); } Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { peer_id: _, - id: RequestId::Sync(10), + id: _, response, }) => // Should receive the RPC response @@ -497,7 +635,7 @@ fn test_blocks_by_root_chunked_rpc() { let log_level = Level::Debug; let enable_logging = false; - let messages_to_send = 10; + let messages_to_send = 6; let log = common::build_log(log_level, enable_logging); let spec = E::default_spec(); @@ -516,10 +654,6 @@ fn test_blocks_by_root_chunked_rpc() { Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), - Hash256::from_low_u64_be(0), ]), }); @@ -532,6 +666,10 @@ fn test_blocks_by_root_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let full_block = merge_block_small(&common::fork_context()); + let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); + let rpc_response_merge_small = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + // keep count of the number of messages received let mut messages_received = 0; // build the sender future @@ -543,20 +681,22 @@ fn test_blocks_by_root_chunked_rpc() { debug!(log, "Sending RPC"); sender.swarm.behaviour_mut().send_request( peer_id, - RequestId::Sync(10), + RequestId::Sync(6), rpc_request.clone(), ); } Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { peer_id: _, - id: RequestId::Sync(10), + id: RequestId::Sync(6), response, }) => match response { Response::BlocksByRoot(Some(_)) => { - if messages_received < 5 { + if messages_received < 2 { assert_eq!(response, rpc_response_base.clone()); - } else { + } else if messages_received < 4 { assert_eq!(response, rpc_response_altair.clone()); + } else { + assert_eq!(response, rpc_response_merge_small.clone()); } messages_received += 1; debug!(log, "Chunk received"); @@ -588,12 +728,13 @@ fn test_blocks_by_root_chunked_rpc() { debug!(log, "Receiver got request"); for i in 0..messages_to_send { - // Send first half of responses as base blocks and - // second half as altair blocks. - let rpc_response = if i < 5 { + // Send equal base, altair and merge blocks + let rpc_response = if i < 2 { rpc_response_base.clone() - } else { + } else if i < 4 { rpc_response_altair.clone() + } else { + rpc_response_merge_small.clone() }; receiver.swarm.behaviour_mut().send_successful_response( peer_id, @@ -619,7 +760,7 @@ fn test_blocks_by_root_chunked_rpc() { _ = sender_future => {} _ = receiver_future => {} _ = sleep(Duration::from_secs(30)) => { - panic!("Future timed out"); + panic!("Future timed out"); } } }) diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index f9d086701a..df68518881 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2018" [dev-dependencies] -sloggers = "2.0.2" +sloggers = { version = "2.1.1", features = ["json"] } genesis = { path = "../genesis" } matches = "0.1.8" exit-future = "0.2.0" @@ -23,8 +23,8 @@ types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.4.2" -eth2_ssz = "0.4.0" -eth2_ssz_types = "0.2.1" +eth2_ssz = "0.4.1" +eth2_ssz_types = "0.2.2" futures = "0.3.7" error-chain = "0.12.4" tokio = { version = "1.14.0", features = ["full"] } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index c9b4bfa346..7c3d482fa5 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -63,7 +63,7 @@ use types::{ SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedBlock, QueuedUnaggregate, ReadyWork, + spawn_reprocess_scheduler, QueuedAggregate, QueuedUnaggregate, ReadyWork, }; use worker::{Toolbox, Worker}; @@ -72,6 +72,7 @@ mod tests; mod work_reprocessing_queue; mod worker; +use crate::beacon_processor::work_reprocessing_queue::QueuedBlock; pub use worker::{GossipAggregatePackage, GossipAttestationPackage, ProcessId}; /// The maximum size of the channel for work events to the `BeaconProcessor`. @@ -574,7 +575,7 @@ impl std::convert::From> for WorkEvent { drop_during_sync: false, work: Work::DelayedImportBlock { peer_id, - block: Box::new(block), + block, seen_timestamp, }, }, diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 00b5c009a3..299e71c8d5 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -91,7 +91,7 @@ pub struct QueuedAggregate { /// A block that arrived early and has been queued for later import. pub struct QueuedBlock { pub peer_id: PeerId, - pub block: GossipVerifiedBlock, + pub block: Box>, pub seen_timestamp: Duration, } diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 2e5ee5160b..9ece18d02c 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1,22 +1,25 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; +use beacon_chain::store::Error; use beacon_chain::{ - attestation_verification::{Error as AttnError, VerifiedAttestation}, + attestation_verification::{self, Error as AttnError, VerifiedAttestation}, observed_operations::ObservationOutcome, - sync_committee_verification::Error as SyncCommitteeError, + sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, + BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, ForkChoiceError, + GossipVerifiedBlock, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - SubnetId, SyncCommitteeMessage, SyncSubnetId, + Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use super::{ @@ -97,12 +100,7 @@ enum FailedAtt { impl FailedAtt { pub fn beacon_block_root(&self) -> &Hash256 { - match self { - FailedAtt::Unaggregate { attestation, .. } => &attestation.data.beacon_block_root, - FailedAtt::Aggregate { attestation, .. } => { - &attestation.message.aggregate.data.beacon_block_root - } - } + &self.attestation().data.beacon_block_root } pub fn kind(&self) -> &'static str { @@ -111,6 +109,13 @@ impl FailedAtt { FailedAtt::Aggregate { .. } => "aggregated", } } + + pub fn attestation(&self) -> &Attestation { + match self { + FailedAtt::Unaggregate { attestation, .. } => attestation, + FailedAtt::Aggregate { attestation, .. } => &attestation.message.aggregate, + } + } } /// Items required to verify a batch of unaggregated gossip attestations. @@ -120,7 +125,6 @@ pub struct GossipAttestationPackage { peer_id: PeerId, attestation: Box>, subnet_id: SubnetId, - beacon_block_root: Hash256, should_import: bool, seen_timestamp: Duration, } @@ -137,7 +141,6 @@ impl GossipAttestationPackage { Self { message_id, peer_id, - beacon_block_root: attestation.data.beacon_block_root, attestation, subnet_id, should_import, @@ -177,11 +180,12 @@ impl Worker { /* Auxiliary functions */ /// Penalizes a peer for misbehaviour. - fn gossip_penalize_peer(&self, peer_id: PeerId, action: PeerAction) { + fn gossip_penalize_peer(&self, peer_id: PeerId, action: PeerAction, msg: &'static str) { self.send_network_message(NetworkMessage::ReportPeer { peer_id, action, source: ReportSource::Gossipsub, + msg, }) } @@ -409,6 +413,7 @@ impl Worker { }, reprocess_tx, error, + seen_timestamp, ); } } @@ -607,6 +612,7 @@ impl Worker { }, reprocess_tx, error, + seen_timestamp, ); } } @@ -706,7 +712,7 @@ impl Worker { self.log, "New block received"; "slot" => verified_block.block.slot(), - "hash" => ?verified_block.block_root + "root" => ?verified_block.block_root ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); @@ -733,16 +739,32 @@ impl Worker { self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); return None; } + Err(e @ BlockError::BeaconChainError(_)) => { + debug!( + self.log, + "Gossip block beacon chain error"; + "error" => ?e, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::RepeatProposal { .. }) - | Err(e @ BlockError::NotFinalizedDescendant { .. }) - | Err(e @ BlockError::BeaconChainError(_)) => { + | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); // Prevent recurring behaviour by penalizing the peer slightly. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError, "gossip_block_high"); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } + // TODO(merge): reconsider peer scoring for this event. + Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) + | Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection)) => { + debug!(self.log, "Could not verify block for gossip, ignoring the block"; + "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } @@ -759,11 +781,15 @@ impl Worker { | Err(e @ BlockError::TooManySkippedSlots { .. }) | Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::InconsistentFork(_)) + // TODO(merge): reconsider peer scoring for this event. + | Err(e @ BlockError::ExecutionPayloadError(_)) + // TODO(merge): reconsider peer scoring for this event. + | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) | Err(e @ BlockError::GenesisBlock) => { warn!(self.log, "Could not verify block for gossip, rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError, "gossip_block_low"); return None; } }; @@ -817,7 +843,7 @@ impl Worker { if reprocess_tx .try_send(ReprocessQueueMessage::EarlyBlock(QueuedBlock { peer_id, - block: verified_block, + block: Box::new(verified_block), seen_timestamp: seen_duration, })) .is_err() @@ -914,7 +940,11 @@ impl Worker { "block root" => ?block.canonical_root(), "block slot" => block.slot() ); - self.gossip_penalize_peer(peer_id, PeerAction::MidToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "bad_gossip_block_ssz", + ); trace!( self.log, "Invalid gossip beacon block ssz"; @@ -956,7 +986,11 @@ impl Worker { // the fault on the peer. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // We still penalize a peer slightly to prevent overuse of invalids. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_gossip_exit", + ); return; } }; @@ -1015,7 +1049,11 @@ impl Worker { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize peer slightly for invalids. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_gossip_proposer_slashing", + ); return; } }; @@ -1066,7 +1104,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize peer slightly for invalids. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "invalid_gossip_attester_slashing", + ); return; } }; @@ -1104,6 +1146,7 @@ impl Worker { subnet_id: SyncSubnetId, seen_timestamp: Duration, ) { + let message_slot = sync_signature.slot; let sync_signature = match self .chain .verify_sync_committee_message_for_gossip(sync_signature, subnet_id) @@ -1115,6 +1158,8 @@ impl Worker { message_id, "sync_signature", e, + message_slot, + seen_timestamp, ); return; } @@ -1164,6 +1209,7 @@ impl Worker { sync_contribution: SignedContributionAndProof, seen_timestamp: Duration, ) { + let contribution_slot = sync_contribution.message.contribution.slot; let sync_contribution = match self .chain .verify_sync_contribution_for_gossip(sync_contribution) @@ -1176,6 +1222,8 @@ impl Worker { message_id, "sync_contribution", e, + contribution_slot, + seen_timestamp, ); return; } @@ -1219,15 +1267,13 @@ impl Worker { failed_att: FailedAtt, reprocess_tx: Option>>, error: AttnError, + seen_timestamp: Duration, ) { let beacon_block_root = failed_att.beacon_block_root(); let attestation_type = failed_att.kind(); metrics::register_attestation_error(&error); match &error { - AttnError::FutureEpoch { .. } - | AttnError::PastEpoch { .. } - | AttnError::FutureSlot { .. } - | AttnError::PastSlot { .. } => { + AttnError::FutureSlot { .. } => { /* * These errors can be triggered by a mismatch between our slot and the peer. * @@ -1244,11 +1290,37 @@ impl Worker { // Peers that are slow or not to spec can spam us with these messages draining our // bandwidth. We therefore penalize these peers when they do this. - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_future_slot", + ); // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } + AttnError::PastSlot { .. } => { + // Produce a slot clock frozen at the time we received the message from the + // network. + let seen_clock = &self.chain.slot_clock.freeze_at(seen_timestamp); + let hindsight_verification = + attestation_verification::verify_propagation_slot_range( + seen_clock, + failed_att.attestation(), + ); + + // Only penalize the peer if it would have been invalid at the moment we received + // it. + if hindsight_verification.is_err() { + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_past_slot", + ); + } + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } AttnError::InvalidSelectionProof { .. } | AttnError::InvalidSignature => { /* * These errors are caused by invalid signatures. @@ -1256,7 +1328,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_selection_proof", + ); } AttnError::EmptyAggregationBitfield => { /* @@ -1266,7 +1342,11 @@ impl Worker { * */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_empty_agg_bitfield", + ); } AttnError::AggregatorPubkeyUnknown(_) => { /* @@ -1283,7 +1363,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_agg_pubkey", + ); } AttnError::AggregatorNotInCommittee { .. } => { /* @@ -1300,7 +1384,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_agg_not_in_committee", + ); } AttnError::AttestationAlreadyKnown { .. } => { /* @@ -1376,7 +1464,11 @@ impl Worker { "type" => ?attestation_type, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_val_index_too_high", + ); } AttnError::UnknownHeadBlock { beacon_block_root } => { trace!( @@ -1440,8 +1532,9 @@ impl Worker { } } else { // We shouldn't make any further attempts to process this attestation. - // Downscore the peer. - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + // + // Don't downscore the peer since it's not clear if we requested this head + // block from them or not. self.propagate_validation_result( message_id, peer_id, @@ -1469,7 +1562,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_unknown_target", + ); } AttnError::BadTargetEpoch => { /* @@ -1479,7 +1576,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_bad_target", + ); } AttnError::NoCommitteeForSlotAndIndex { .. } => { /* @@ -1488,7 +1589,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_no_committee", + ); } AttnError::NotExactlyOneAggregationBitSet(_) => { /* @@ -1497,7 +1602,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_too_many_agg_bits", + ); } AttnError::AttestsToFutureBlock { .. } => { /* @@ -1506,7 +1615,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_future_block", + ); } AttnError::InvalidSubnetId { received, expected } => { /* @@ -1519,7 +1632,11 @@ impl Worker { "received" => ?received, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_subnet_id", + ); } AttnError::Invalid(_) => { /* @@ -1528,7 +1645,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_state_processing", + ); } AttnError::InvalidTargetEpoch { .. } => { /* @@ -1537,7 +1658,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_target_epoch", + ); } AttnError::InvalidTargetRoot { .. } => { /* @@ -1546,7 +1671,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_invalid_target_root", + ); } AttnError::TooManySkippedSlots { head_block_slot, @@ -1566,7 +1695,17 @@ impl Worker { // In this case we wish to penalize gossipsub peers that do this to avoid future // attestations that have too many skip slots. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::MidToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "attn_too_many_skipped_slots", + ); + } + AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError( + HotColdDBError::AttestationStateIsFinalized { .. }, + ))) => { + debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } AttnError::BeaconChainError(e) => { /* @@ -1583,8 +1722,6 @@ impl Worker { "error" => ?e, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); } } @@ -1606,6 +1743,8 @@ impl Worker { message_id: MessageId, message_type: &str, error: SyncCommitteeError, + sync_committee_message_slot: Slot, + seen_timestamp: Duration, ) { metrics::register_sync_committee_error(&error); @@ -1626,15 +1765,16 @@ impl Worker { // Unlike attestations, we have a zero slot buffer in case of sync committee messages, // so we don't penalize heavily. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_future_slot", + ); // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } - SyncCommitteeError::PastSlot { - message_slot, - earliest_permissible_slot, - } => { + SyncCommitteeError::PastSlot { .. } => { /* * This error can be triggered by a mismatch between our slot and the peer. * @@ -1648,12 +1788,38 @@ impl Worker { "type" => ?message_type, ); - // We tolerate messages that were just one slot late. - if *message_slot + 1 < *earliest_permissible_slot { - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + // Compute the slot when we received the message. + let received_slot = self + .chain + .slot_clock + .slot_of(seen_timestamp) + .unwrap_or_else(|| self.chain.slot_clock.genesis_slot()); + + // The message is "excessively" late if it was more than one slot late. + let excessively_late = received_slot > sync_committee_message_slot + 1; + + // This closure will lazily produce a slot clock frozen at the time we received the + // message from the network and return a bool indicating if the message was invalid + // at the time of receipt too. + let invalid_in_hindsight = || { + let seen_clock = &self.chain.slot_clock.freeze_at(seen_timestamp); + let hindsight_verification = + sync_committee_verification::verify_propagation_slot_range( + seen_clock, + &sync_committee_message_slot, + ); + hindsight_verification.is_err() + }; + + // Penalize the peer if the message was more than one slot late + if excessively_late && invalid_in_hindsight() { + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_past_slot", + ); } - // Do not propagate these messages. self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); } SyncCommitteeError::EmptyAggregationBitfield => { @@ -1664,7 +1830,11 @@ impl Worker { * */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_empty_agg_bitfield", + ); } SyncCommitteeError::InvalidSelectionProof { .. } | SyncCommitteeError::InvalidSignature => { @@ -1674,7 +1844,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_proof_or_sig", + ); } SyncCommitteeError::AggregatorNotInCommittee { .. } | SyncCommitteeError::AggregatorPubkeyUnknown(_) => { @@ -1685,7 +1859,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_bad_aggregator", + ); } SyncCommitteeError::SyncContributionAlreadyKnown(_) | SyncCommitteeError::AggregatorAlreadyKnown(_) => { @@ -1718,7 +1896,11 @@ impl Worker { "type" => ?message_type, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_unknown_validator", + ); } SyncCommitteeError::UnknownValidatorPubkey(_) => { debug!( @@ -1728,7 +1910,11 @@ impl Worker { "type" => ?message_type, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_unknown_validator_pubkey", + ); } SyncCommitteeError::InvalidSubnetId { received, expected } => { /* @@ -1741,7 +1927,11 @@ impl Worker { "received" => ?received, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_subnet_id", + ); } SyncCommitteeError::Invalid(_) => { /* @@ -1750,7 +1940,11 @@ impl Worker { * The peer has published an invalid consensus message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_state_processing", + ); } SyncCommitteeError::PriorSyncCommitteeMessageKnown { .. } => { /* @@ -1766,7 +1960,11 @@ impl Worker { ); // We still penalize the peer slightly. We don't want this to be a recurring // behaviour. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_prior_known", + ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -1787,8 +1985,6 @@ impl Worker { "error" => ?e, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); } SyncCommitteeError::BeaconStateError(e) => { /* @@ -1806,7 +2002,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_beacon_state_error", + ); } SyncCommitteeError::ContributionError(e) => { error!( @@ -1817,7 +2017,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_contribution_error", + ); } SyncCommitteeError::SyncCommitteeError(e) => { error!( @@ -1828,7 +2032,11 @@ impl Worker { ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); // Penalize the peer slightly - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "sync_committee_error", + ); } SyncCommitteeError::ArithError(e) => { /* @@ -1841,7 +2049,11 @@ impl Worker { "error" => ?e, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_arith_error", + ); } SyncCommitteeError::InvalidSubcommittee { .. } => { /* @@ -1849,7 +2061,11 @@ impl Worker { an invalid message. */ self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "sync_invalid_subcommittee", + ); } } debug!( diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index f3d49c2b42..f79a655745 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -129,7 +129,7 @@ impl Worker { ) { let mut send_block_count = 0; for root in request.block_roots.iter() { - if let Ok(Some(block)) = self.chain.store.get_block(root) { + if let Ok(Some(block)) = self.chain.get_block_checking_early_attester_cache(root) { self.send_response( peer_id, Response::BlocksByRoot(Some(Box::new(block))), diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 6a75c2990a..27e0a6711d 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -7,7 +7,7 @@ use crate::sync::{BatchProcessResult, ChainId}; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; -use lighthouse_network::PeerId; +use lighthouse_network::{PeerAction, PeerId}; use slog::{crit, debug, error, info, trace, warn}; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -23,6 +23,14 @@ pub enum ProcessId { ParentLookup(PeerId, Hash256), } +/// Returned when a chain segment import fails. +struct ChainSegmentFailed { + /// To be displayed in logs. + message: String, + /// Used to penalize peers. + peer_action: Option, +} + impl Worker { /// Attempt to process a block received from a direct RPC request, returning the processing /// result on the `result_tx` channel. @@ -123,9 +131,13 @@ impl Worker { "chain" => chain_id, "last_block_slot" => end_slot, "imported_blocks" => imported_blocks, - "error" => e, + "error" => %e.message, "service" => "sync"); - BatchProcessResult::Failed(imported_blocks > 0) + + BatchProcessResult::Failed { + imported_blocks: imported_blocks > 0, + peer_action: e.peer_action, + } } }; @@ -154,9 +166,12 @@ impl Worker { "batch_epoch" => epoch, "first_block_slot" => start_slot, "last_block_slot" => end_slot, - "error" => e, + "error" => %e.message, "service" => "sync"); - BatchProcessResult::Failed(false) + BatchProcessResult::Failed { + imported_blocks: false, + peer_action: e.peer_action, + } } }; @@ -175,7 +190,7 @@ impl Worker { // reverse match self.process_blocks(downloaded_blocks.iter().rev()) { (_, Err(e)) => { - debug!(self.log, "Parent lookup failed"; "last_peer_id" => %peer_id, "error" => e); + debug!(self.log, "Parent lookup failed"; "last_peer_id" => %peer_id, "error" => %e.message); self.send_sync_message(SyncMessage::ParentLookupFailed { peer_id, chain_head, @@ -193,7 +208,7 @@ impl Worker { fn process_blocks<'a>( &self, downloaded_blocks: impl Iterator>, - ) -> (usize, Result<(), String>) { + ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks = downloaded_blocks.cloned().collect::>(); match self.chain.process_chain_segment(blocks) { ChainSegmentResult::Successful { imported_blocks } => { @@ -223,7 +238,7 @@ impl Worker { fn process_backfill_blocks( &self, blocks: &[SignedBeaconBlock], - ) -> (usize, Result<(), String>) { + ) -> (usize, Result<(), ChainSegmentFailed>) { match self.chain.import_historical_block_batch(blocks) { Ok(imported_blocks) => { metrics::inc_counter( @@ -250,7 +265,12 @@ impl Worker { "block_root" => ?block_root, "expected_root" => ?expected_block_root ); - String::from("mismatched_block_root") + + ChainSegmentFailed { + message: String::from("mismatched_block_root"), + // The peer is faulty if they send blocks with bad roots. + peer_action: Some(PeerAction::LowToleranceError), + } } HistoricalBlockError::InvalidSignature | HistoricalBlockError::SignatureSet(_) => { @@ -259,7 +279,12 @@ impl Worker { "Backfill batch processing error"; "error" => ?e ); - "invalid_signature".into() + + ChainSegmentFailed { + message: "invalid_signature".into(), + // The peer is faulty if they bad signatures. + peer_action: Some(PeerAction::LowToleranceError), + } } HistoricalBlockError::ValidatorPubkeyCacheTimeout => { warn!( @@ -267,25 +292,55 @@ impl Worker { "Backfill batch processing error"; "error" => "pubkey_cache_timeout" ); - "pubkey_cache_timeout".into() + + ChainSegmentFailed { + message: "pubkey_cache_timeout".into(), + // This is an internal error, do not penalize the peer. + peer_action: None, + } } HistoricalBlockError::NoAnchorInfo => { warn!(self.log, "Backfill not required"); - String::from("no_anchor_info") + + ChainSegmentFailed { + message: String::from("no_anchor_info"), + // There is no need to do a historical sync, this is not a fault of + // the peer. + peer_action: None, + } } - HistoricalBlockError::IndexOutOfBounds - | HistoricalBlockError::BlockOutOfRange { .. } => { + HistoricalBlockError::IndexOutOfBounds => { error!( self.log, - "Backfill batch processing error"; + "Backfill batch OOB error"; "error" => ?e, ); - String::from("logic_error") + ChainSegmentFailed { + message: String::from("logic_error"), + // This should never occur, don't penalize the peer. + peer_action: None, + } + } + HistoricalBlockError::BlockOutOfRange { .. } => { + error!( + self.log, + "Backfill batch error"; + "error" => ?e, + ); + ChainSegmentFailed { + message: String::from("unexpected_error"), + // This should never occur, don't penalize the peer. + peer_action: None, + } } }, other => { warn!(self.log, "Backfill batch processing error"; "error" => ?other); - format!("{:?}", other) + ChainSegmentFailed { + message: format!("{:?}", other), + // This is an internal error, don't penalize the peer. + peer_action: None, + } } }; (0, Err(err)) @@ -312,15 +367,18 @@ impl Worker { } /// Helper function to handle a `BlockError` from `process_chain_segment` - fn handle_failed_chain_segment(&self, error: BlockError) -> Result<(), String> { + fn handle_failed_chain_segment( + &self, + error: BlockError, + ) -> Result<(), ChainSegmentFailed> { match error { BlockError::ParentUnknown(block) => { // blocks should be sequential and all parents should exist - - Err(format!( - "Block has an unknown parent: {}", - block.parent_root() - )) + Err(ChainSegmentFailed { + message: format!("Block has an unknown parent: {}", block.parent_root()), + // Peers are faulty if they send non-sequential blocks. + peer_action: Some(PeerAction::LowToleranceError), + }) } BlockError::BlockIsAlreadyKnown => { // This can happen for many reasons. Head sync's can download multiples and parent @@ -350,10 +408,14 @@ impl Worker { ); } - Err(format!( - "Block with slot {} is higher than the current slot {}", - block_slot, present_slot - )) + Err(ChainSegmentFailed { + message: format!( + "Block with slot {} is higher than the current slot {}", + block_slot, present_slot + ), + // Peers are faulty if they send blocks from the future. + peer_action: Some(PeerAction::LowToleranceError), + }) } BlockError::WouldRevertFinalizedSlot { .. } => { debug!(self.log, "Finalized or earlier block processed";); @@ -370,7 +432,11 @@ impl Worker { "outcome" => ?e, ); - Err(format!("Internal error whilst processing block: {:?}", e)) + Err(ChainSegmentFailed { + message: format!("Internal error whilst processing block: {:?}", e), + // Do not penalize peers for internal errors. + peer_action: None, + }) } other => { debug!( @@ -379,7 +445,11 @@ impl Worker { "outcome" => %other, ); - Err(format!("Peer sent invalid block. Reason: {:?}", other)) + Err(ChainSegmentFailed { + message: format!("Peer sent invalid block. Reason: {:?}", other), + // Do not penalize peers for internal errors. + peer_action: None, + }) } } } diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 0dfc657165..a10d238764 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -4,216 +4,42 @@ use beacon_chain::{ }; use fnv::FnvHashMap; pub use lighthouse_metrics::*; -use lighthouse_network::PubsubMessage; use lighthouse_network::{ - types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, TopicHash, + types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, }; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use strum::AsStaticRef; -use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, subnet_id::subnet_id_to_string, - sync_subnet_id::sync_subnet_id_to_string, EthSpec, -}; +use types::EthSpec; lazy_static! { - /* - * Gossip subnets and scoring - */ - pub static ref PEERS_PER_PROTOCOL: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_protocol", - "Peers via supported protocol", - &["protocol"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_subscribed_attestation_subnets", - "Attestation subnets currently subscribed to", - &["subnet"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_subscribed_sync_subnets", - "Sync subnets currently subscribed to", - &["subnet"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_attestation_subnet_topic_count", - "Peers subscribed per attestation subnet topic", - &["subnet"] - ); - - pub static ref GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_peers_per_sync_subnet_topic_count", - "Peers subscribed per sync subnet topic", - &["subnet"] - ); - - pub static ref MESH_PEERS_PER_MAIN_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_mesh_peers_per_main_topic", - "Mesh peers per main topic", - &["topic_hash"] - ); - - pub static ref MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_mesh_peers_per_subnet_topic", - "Mesh peers per subnet topic", - &["subnet"] - ); - - pub static ref MESH_PEERS_PER_SYNC_SUBNET_TOPIC: Result = try_create_int_gauge_vec( - "gossipsub_mesh_peers_per_subnet_topic", - "Mesh peers per subnet topic", - &["subnet"] - ); - - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_topic", - "Average peer's score per topic", - &["topic_hash"] - ); - - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_attestation_subnet_topic", - "Average peer's score per attestation subnet topic", - &["subnet"] - ); - - pub static ref AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC: Result = try_create_float_gauge_vec( - "gossipsub_avg_peer_score_per_sync_subnet_topic", - "Average peer's score per sync committee subnet topic", - &["subnet"] - ); - - pub static ref ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT: Result = try_create_int_counter_vec( - "gossipsub_attestations_published_per_subnet_per_slot", - "Failed attestation publishes per subnet", - &["subnet"] - ); - - pub static ref SCORES_BELOW_ZERO_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_zero_per_client", - "Relative number of scores below zero per client", - &["Client"] - ); - pub static ref SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_gossip_threshold_per_client", - "Relative number of scores below gossip threshold per client", - &["Client"] - ); - pub static ref SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_publish_threshold_per_client", - "Relative number of scores below publish threshold per client", - &["Client"] - ); - pub static ref SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_scores_below_greylist_threshold_per_client", - "Relative number of scores below greylist threshold per client", - &["Client"] - ); - - pub static ref MIN_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_min_scores_per_client", - "Minimum scores per client", - &["Client"] - ); - pub static ref MEDIAN_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_median_scores_per_client", - "Median scores per client", - &["Client"] - ); - pub static ref MEAN_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_mean_scores_per_client", - "Mean scores per client", - &["Client"] - ); - pub static ref MAX_SCORES_PER_CLIENT: Result = try_create_float_gauge_vec( - "gossipsub_max_scores_per_client", - "Max scores per client", - &["Client"] - ); pub static ref BEACON_BLOCK_MESH_PEERS_PER_CLIENT: Result = - try_create_int_gauge_vec( - "block_mesh_peers_per_client", - "Number of mesh peers for BeaconBlock topic per client", - &["Client"] - ); + try_create_int_gauge_vec( + "block_mesh_peers_per_client", + "Number of mesh peers for BeaconBlock topic per client", + &["Client"] + ); + pub static ref BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT: Result = try_create_int_gauge_vec( "beacon_aggregate_and_proof_mesh_peers_per_client", "Number of mesh peers for BeaconAggregateAndProof topic per client", &["Client"] ); -} - -lazy_static! { - /* - * Gossip Rx - */ - pub static ref GOSSIP_BLOCKS_RX: Result = try_create_int_counter( - "gossipsub_blocks_rx_total", - "Count of gossip blocks received" - ); - pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( - "gossipsub_unaggregated_attestations_rx_total", - "Count of gossip unaggregated attestations received" - ); - pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_RX: Result = try_create_int_counter( - "gossipsub_aggregated_attestations_rx_total", - "Count of gossip aggregated attestations received" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_RX: Result = try_create_int_counter( - "gossipsub_sync_committee_message_rx_total", - "Count of gossip sync committee messages received" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX: Result = try_create_int_counter( - "gossipsub_sync_committee_contribution_received_total", - "Count of gossip sync committee contributions received" - ); - - - /* - * Gossip Tx - */ - pub static ref GOSSIP_BLOCKS_TX: Result = try_create_int_counter( - "gossipsub_blocks_tx_total", - "Count of gossip blocks transmitted" - ); - pub static ref GOSSIP_UNAGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( - "gossipsub_unaggregated_attestations_tx_total", - "Count of gossip unaggregated attestations transmitted" - ); - pub static ref GOSSIP_AGGREGATED_ATTESTATIONS_TX: Result = try_create_int_counter( - "gossipsub_aggregated_attestations_tx_total", - "Count of gossip aggregated attestations transmitted" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_MESSAGE_TX: Result = try_create_int_counter( - "gossipsub_sync_committee_message_tx_total", - "Count of gossip sync committee messages transmitted" - ); - pub static ref GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX: Result = try_create_int_counter( - "gossipsub_sync_committee_contribution_tx_total", - "Count of gossip sync committee contributions transmitted" - ); /* * Attestation subnet subscriptions */ pub static ref SUBNET_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "gossipsub_attestation_subnet_subscriptions_total", + "validator_attestation_subnet_subscriptions_total", "Count of validator attestation subscription requests." ); pub static ref SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS: Result = try_create_int_counter( - "gossipsub_subnet_subscriptions_aggregator_total", + "validator_subnet_subscriptions_aggregator_total", "Count of validator subscription requests where the subscriber is an aggregator." ); - - /* - * Sync committee subnet subscriptions - */ - pub static ref SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( - "gossipsub_sync_committee_subnet_subscriptions_total", + pub static ref SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: Result = try_create_int_counter( + "validator_sync_committee_subnet_subscriptions_total", "Count of validator sync committee subscription requests." ); @@ -406,14 +232,13 @@ lazy_static! { "beacon_processor_sync_contribution_verified_total", "Total number of sync committee contributions verified for gossip." ); + pub static ref BEACON_PROCESSOR_SYNC_CONTRIBUTION_IMPORTED_TOTAL: Result = try_create_int_counter( "beacon_processor_sync_contribution_imported_total", "Total number of sync committee contributions imported to fork choice, etc." ); -} - -lazy_static! { + /// Errors and Debugging Stats pub static ref GOSSIP_ATTESTATION_ERRORS_PER_TYPE: Result = try_create_int_counter_vec( "gossipsub_attestation_errors_per_type", @@ -426,8 +251,16 @@ lazy_static! { "Gossipsub sync_committee errors per error type", &["type"] ); +} + +lazy_static! { + + /* + * Bandwidth metrics + */ pub static ref INBOUND_LIBP2P_BYTES: Result = try_create_int_gauge("libp2p_inbound_bytes", "The inbound bandwidth over libp2p"); + pub static ref OUTBOUND_LIBP2P_BYTES: Result = try_create_int_gauge( "libp2p_outbound_bytes", "The outbound bandwidth over libp2p" @@ -436,18 +269,8 @@ lazy_static! { "libp2p_total_bandwidth", "The total inbound/outbound bandwidth over libp2p" ); -} -pub fn update_bandwidth_metrics(bandwidth: Arc) { - set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64); - set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64); - set_gauge( - &TOTAL_LIBP2P_BANDWIDTH, - (bandwidth.total_inbound() + bandwidth.total_outbound()) as i64, - ); -} -lazy_static! { /* * Sync related metrics */ @@ -489,11 +312,21 @@ lazy_static! { ); pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_EXPIRED_ATTESTATIONS: Result = try_create_int_counter( "beacon_processor_reprocessing_queue_expired_attestations", - "Number of queued attestations which have expired before a matching block has been found" + "Number of queued attestations which have expired before a matching block has been found." ); pub static ref BEACON_PROCESSOR_REPROCESSING_QUEUE_MATCHED_ATTESTATIONS: Result = try_create_int_counter( "beacon_processor_reprocessing_queue_matched_attestations", - "Number of queued attestations where as matching block has been imported" + "Number of queued attestations where as matching block has been imported." + ); + +} + +pub fn update_bandwidth_metrics(bandwidth: Arc) { + set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64); + set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64); + set_gauge( + &TOTAL_LIBP2P_BANDWIDTH, + (bandwidth.total_inbound() + bandwidth.total_outbound()) as i64, ); } @@ -505,402 +338,51 @@ pub fn register_sync_committee_error(error: &SyncCommitteeError) { inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); } -/// Inspects the `messages` that were being sent to the network and updates Prometheus metrics. -pub fn expose_publish_metrics(messages: &[PubsubMessage]) { - for message in messages { - match message { - PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_TX), - PubsubMessage::Attestation(subnet_id) => { - inc_counter_vec( - &ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT, - &[subnet_id.0.as_ref()], - ); - inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_TX) - } - PubsubMessage::AggregateAndProofAttestation(_) => { - inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_TX) - } - PubsubMessage::SyncCommitteeMessage(_) => { - inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_TX) - } - PubsubMessage::SignedContributionAndProof(_) => { - inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_TX) - } - _ => {} - } - } -} - -/// Inspects a `message` received from the network and updates Prometheus metrics. -pub fn expose_receive_metrics(message: &PubsubMessage) { - match message { - PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_RX), - PubsubMessage::Attestation(_) => inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_RX), - PubsubMessage::AggregateAndProofAttestation(_) => { - inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_RX) - } - PubsubMessage::SyncCommitteeMessage(_) => inc_counter(&GOSSIP_SYNC_COMMITTEE_MESSAGE_RX), - PubsubMessage::SignedContributionAndProof(_) => { - inc_counter(&GOSSIP_SYNC_COMMITTEE_CONTRIBUTION_RX) - } - _ => {} - } -} - pub fn update_gossip_metrics( gossipsub: &Gossipsub, network_globals: &Arc>, ) { - // Clear the metrics - let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset()); - let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset()); - let _ = MESH_PEERS_PER_MAIN_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC - .as_ref() - .map(|gauge| gauge.reset()); - - let _ = SCORES_BELOW_ZERO_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = MIN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - let _ = MEDIAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - let _ = MEAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - let _ = MAX_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset()); - - let _ = BEACON_BLOCK_MESH_PEERS_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - let _ = BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT - .as_ref() - .map(|gauge| gauge.reset()); - - // reset the mesh peers, showing all subnets - for subnet_id in 0..T::default_spec().attestation_subnet_count { - let _ = get_int_gauge( - &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - } - - for subnet_id in 0..SYNC_COMMITTEE_SUBNET_COUNT { - let _ = get_int_gauge( - &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id)], - ) - .map(|v| v.set(0)); - } - - // Subnet topics subscribed to - for topic_hash in gossipsub.topics() { - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - if let GossipKind::Attestation(subnet_id) = topic.kind() { - let _ = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) - .map(|v| v.set(1)); - } - } - } - - // Peers per subscribed subnet - let mut peers_per_topic: HashMap = HashMap::new(); - for (peer_id, topics) in gossipsub.all_peers() { - for topic_hash in topics { - *peers_per_topic.entry(topic_hash.clone()).or_default() += 1; - - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - if let Some(v) = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.inc() - }; - - // average peer scores - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.add(score) - }; - } - } - GossipKind::SyncCommitteeMessage(subnet_id) => { - if let Some(v) = get_int_gauge( - &GOSSIPSUB_SUBSCRIBED_PEERS_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.inc() - }; - - // average peer scores - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.add(score) - }; - } - } - kind => { - // main topics - if let Some(score) = gossipsub.peer_score(peer_id) { - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, - &[kind.as_ref()], - ) { - v.add(score) - }; - } - } - } - } - } - } - // adjust to average scores by dividing by number of peers - for (topic_hash, peers) in peers_per_topic.iter() { - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - // average peer scores - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.set(v.get() / (*peers as f64)) - }; - } - GossipKind::SyncCommitteeMessage(subnet_id) => { - // average peer scores - if let Some(v) = get_gauge( - &AVG_GOSSIPSUB_PEER_SCORE_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.set(v.get() / (*peers as f64)) - }; - } - kind => { - // main topics - if let Some(v) = - get_gauge(&AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, &[kind.as_ref()]) - { - v.set(v.get() / (*peers as f64)) - }; - } - } - } - } - - // mesh peers - for topic_hash in gossipsub.topics() { - let peers = gossipsub.mesh_peers(topic_hash).count(); - if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { - match topic.kind() { - GossipKind::Attestation(subnet_id) => { - if let Some(v) = get_int_gauge( - &MESH_PEERS_PER_ATTESTATION_SUBNET_TOPIC, - &[subnet_id_to_string(subnet_id.into())], - ) { - v.set(peers as i64) - }; - } - GossipKind::SyncCommitteeMessage(subnet_id) => { - if let Some(v) = get_int_gauge( - &MESH_PEERS_PER_SYNC_SUBNET_TOPIC, - &[sync_subnet_id_to_string(subnet_id.into())], - ) { - v.set(peers as i64) - }; - } - kind => { - // main topics - if let Some(v) = get_int_gauge(&MESH_PEERS_PER_MAIN_TOPIC, &[kind.as_ref()]) { - v.set(peers as i64) - }; - } - } - } - } - - // protocol peers - let mut peers_per_protocol: HashMap<&'static str, i64> = HashMap::new(); - for (_peer, protocol) in gossipsub.peer_protocol() { - *peers_per_protocol - .entry(protocol.as_static_ref()) - .or_default() += 1; - } - - for (protocol, peers) in peers_per_protocol.iter() { - if let Some(v) = get_int_gauge(&PEERS_PER_PROTOCOL, &[protocol]) { - v.set(*peers) - }; - } - - let mut peer_to_client = HashMap::new(); - let mut scores_per_client: HashMap<&'static str, Vec> = HashMap::new(); - { - let peers = network_globals.peers(); - for (peer_id, _) in gossipsub.all_peers() { - let client = peers - .peer_info(peer_id) - .map(|peer_info| peer_info.client().kind.as_static()) - .unwrap_or_else(|| "Unknown"); - - peer_to_client.insert(peer_id, client); - let score = gossipsub.peer_score(peer_id).unwrap_or(0.0); - scores_per_client.entry(client).or_default().push(score); - } - } - - // mesh peers per client + // Mesh peers per client for topic_hash in gossipsub.topics() { if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) { match topic.kind() { + GossipKind::Attestation(_subnet_id) => {} GossipKind::BeaconBlock => { - for peer in gossipsub.mesh_peers(topic_hash) { - if let Some(client) = peer_to_client.get(peer) { - if let Some(v) = - get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client]) - { - v.inc() - }; - } + for peer_id in gossipsub.mesh_peers(topic_hash) { + let client = network_globals + .peers + .read() + .peer_info(peer_id) + .map(|peer_info| peer_info.client().kind.as_static()) + .unwrap_or_else(|| "Unknown"); + if let Some(v) = + get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client]) + { + v.inc() + }; } } GossipKind::BeaconAggregateAndProof => { - for peer in gossipsub.mesh_peers(topic_hash) { - if let Some(client) = peer_to_client.get(peer) { - if let Some(v) = get_int_gauge( - &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, - &[client], - ) { - v.inc() - }; - } + for peer_id in gossipsub.mesh_peers(topic_hash) { + let client = network_globals + .peers + .read() + .peer_info(peer_id) + .map(|peer_info| peer_info.client().kind.as_static()) + .unwrap_or_else(|| "Unknown"); + if let Some(v) = get_int_gauge( + &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, + &[client], + ) { + v.inc() + }; } } - _ => (), + GossipKind::SyncCommitteeMessage(_subnet_id) => {} + _kind => {} } } } - - for (client, scores) in scores_per_client.into_iter() { - let c = &[client]; - let len = scores.len(); - if len > 0 { - let mut below0 = 0; - let mut below_gossip_threshold = 0; - let mut below_publish_threshold = 0; - let mut below_greylist_threshold = 0; - let mut min = f64::INFINITY; - let mut sum = 0.0; - let mut max = f64::NEG_INFINITY; - - let count = scores.len() as f64; - - for &score in &scores { - if score < 0.0 { - below0 += 1; - } - if score < -4000.0 { - //TODO not hardcode - below_gossip_threshold += 1; - } - if score < -8000.0 { - //TODO not hardcode - below_publish_threshold += 1; - } - if score < -16000.0 { - //TODO not hardcode - below_greylist_threshold += 1; - } - if score < min { - min = score; - } - if score > max { - max = score; - } - sum += score; - } - - let median = if len == 0 { - 0.0 - } else if len % 2 == 0 { - (scores[len / 2 - 1] + scores[len / 2]) / 2.0 - } else { - scores[len / 2] - }; - - set_gauge_entry(&SCORES_BELOW_ZERO_PER_CLIENT, c, below0 as f64 / count); - set_gauge_entry( - &SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT, - c, - below_gossip_threshold as f64 / count, - ); - set_gauge_entry( - &SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT, - c, - below_publish_threshold as f64 / count, - ); - set_gauge_entry( - &SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT, - c, - below_greylist_threshold as f64 / count, - ); - - set_gauge_entry(&MIN_SCORES_PER_CLIENT, c, min); - set_gauge_entry(&MEDIAN_SCORES_PER_CLIENT, c, median); - set_gauge_entry(&MEAN_SCORES_PER_CLIENT, c, sum / count); - set_gauge_entry(&MAX_SCORES_PER_CLIENT, c, max); - } - } } pub fn update_sync_metrics(network_globals: &Arc>) { @@ -916,7 +398,8 @@ pub fn update_sync_metrics(network_globals: &Arc>) // count per sync status, the number of connected peers let mut peers_per_sync_type = FnvHashMap::default(); for sync_type in network_globals - .peers() + .peers + .read() .connected_peers() .map(|(_peer_id, info)| info.sync_status().as_str()) { diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 0ab4c742d4..8d639c5ee6 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -154,7 +154,7 @@ impl Router { /// A new RPC request has been received from the network. fn handle_rpc_request(&mut self, peer_id: PeerId, id: PeerRequestId, request: Request) { - if !self.network_globals.peers().is_connected(&peer_id) { + if !self.network_globals.peers.read().is_connected(&peer_id) { debug!(self.log, "Dropping request of disconnected peer"; "peer_id" => %peer_id, "request" => ?request); return; } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index d9adcd28c2..35cf3fa90e 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -9,15 +9,18 @@ use crate::{ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use futures::future::OptionFuture; use futures::prelude::*; +use lighthouse_network::{ + open_metrics_client::registry::Registry, MessageAcceptance, Service as LibP2PService, +}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId}, - Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, + Context, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, + Response, Subnet, }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, BehaviourEvent, MessageId, NetworkGlobals, PeerId, }; -use lighthouse_network::{MessageAcceptance, Service as LibP2PService, SyncStatus}; use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; @@ -32,7 +35,7 @@ use types::{ mod tests; /// The interval (in seconds) that various network metrics will update. -const METRIC_UPDATE_INTERVAL: u64 = 1; +const METRIC_UPDATE_INTERVAL: u64 = 5; /// Number of slots before the fork when we should subscribe to the new fork topics. const SUBSCRIBE_DELAY_SLOTS: u64 = 2; /// Delay after a fork where we unsubscribe from pre-fork topics. @@ -93,6 +96,7 @@ pub enum NetworkMessage { peer_id: PeerId, action: PeerAction, source: ReportSource, + msg: &'static str, }, /// Disconnect an ban a peer, providing a reason. GoodbyePeer { @@ -100,10 +104,6 @@ pub enum NetworkMessage { reason: GoodbyeReason, source: ReportSource, }, - UpdatePeerSyncStatus { - peer_id: PeerId, - sync_status: SyncStatus, - }, } /// Service that handles communication between internal services and the `lighthouse_network` network service. @@ -158,6 +158,7 @@ impl NetworkService { beacon_chain: Arc>, config: &NetworkConfig, executor: task_executor::TaskExecutor, + gossipsub_registry: Option<&'_ mut Registry>, ) -> error::Result<( Arc>, mpsc::UnboundedSender>, @@ -203,16 +204,18 @@ impl NetworkService { debug!(network_log, "Current fork"; "fork_name" => ?fork_context.current_fork()); - // launch libp2p service - let (network_globals, mut libp2p) = LibP2PService::new( - executor.clone(), + // construct the libp2p service context + let service_context = Context { config, enr_fork_id, - &network_log, - fork_context.clone(), - &beacon_chain.spec, - ) - .await?; + fork_context: fork_context.clone(), + chain_spec: &beacon_chain.spec, + gossipsub_registry, + }; + + // launch libp2p service + let (network_globals, mut libp2p) = + LibP2PService::new(executor.clone(), service_context, &network_log).await?; // Repopulate the DHT with stored ENR's if discovery is not disabled. if !config.disable_discovery { @@ -328,21 +331,13 @@ fn spawn_service( // spawn on the current executor executor.spawn(async move { - let mut metric_update_counter = 0; loop { // build the futures to check simultaneously tokio::select! { _ = service.metrics_update.tick(), if service.metrics_enabled => { // update various network metrics - metric_update_counter +=1; - if metric_update_counter % T::EthSpec::default_spec().seconds_per_slot == 0 { - // if a slot has occurred, reset the metrics - let _ = metrics::ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT - .as_ref() - .map(|gauge| gauge.reset()); - } metrics::update_gossip_metrics::( - service.libp2p.swarm.behaviour_mut().gs(), + service.libp2p.swarm.behaviour().gs(), &service.network_globals, ); // update sync metrics @@ -449,10 +444,9 @@ fn spawn_service( "count" => messages.len(), "topics" => ?topic_kinds ); - metrics::expose_publish_metrics(&messages); service.libp2p.swarm.behaviour_mut().publish(messages); } - NetworkMessage::ReportPeer { peer_id, action, source } => service.libp2p.report_peer(&peer_id, action, source), + NetworkMessage::ReportPeer { peer_id, action, source, msg } => service.libp2p.report_peer(&peer_id, action, source, msg), NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source), NetworkMessage::AttestationSubscribe { subscriptions } => { if let Err(e) = service @@ -531,9 +525,6 @@ fn spawn_service( ); } } - NetworkMessage::UpdatePeerSyncStatus{peer_id, sync_status} => { - service.libp2p.swarm.behaviour_mut().update_peers_sync_status(&peer_id, sync_status); - } } } // process any attestation service events @@ -650,9 +641,6 @@ fn spawn_service( message, .. } => { - // Update prometheus metrics. - metrics::expose_receive_metrics(&message); - match message { // attestation information gets processed in the attestation service PubsubMessage::Attestation(ref subnet_and_attestation) => { diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 33b190e480..d78b1fe4f8 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -67,9 +67,10 @@ mod tests { // Create a new network service which implicitly gets dropped at the // end of the block. - let _network_service = NetworkService::start(beacon_chain.clone(), &config, executor) - .await - .unwrap(); + let _network_service = + NetworkService::start(beacon_chain.clone(), &config, executor, None) + .await + .unwrap(); drop(signal); }); diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index fa52fddc36..ade490e00e 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use lighthouse_network::rpc::StatusMessage; @@ -25,9 +23,3 @@ impl ToStatusMessage for BeaconChain { }) } } - -impl ToStatusMessage for Arc> { - fn status_message(&self) -> Result { - as ToStatusMessage>::status_message(self) - } -} diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index da0c1fc8c2..581f6b3270 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -5,7 +5,7 @@ use beacon_chain::{ BeaconChain, }; use futures::prelude::*; -use genesis::{generate_deterministic_keypairs, interop_genesis_state}; +use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lazy_static::lazy_static; use lighthouse_network::NetworkConfig; use slog::Logger; @@ -16,8 +16,8 @@ use std::time::{Duration, SystemTime}; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; use types::{ - CommitteeIndex, Epoch, EthSpec, MainnetEthSpec, Slot, SubnetId, SyncCommitteeSubscription, - SyncSubnetId, ValidatorSubscription, + CommitteeIndex, Epoch, EthSpec, Hash256, MainnetEthSpec, Slot, SubnetId, + SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; const SLOT_DURATION_MILLIS: u64 = 400; @@ -52,8 +52,14 @@ impl TestBeaconChain { .custom_spec(spec.clone()) .store(Arc::new(store)) .genesis_state( - interop_genesis_state::(&keypairs, 0, &spec) - .expect("should generate interop state"), + interop_genesis_state::( + &keypairs, + 0, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + &spec, + ) + .expect("should generate interop state"), ) .expect("should build state using recent genesis") .dummy_eth1_backend() diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index b734773a3b..610081319d 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -213,7 +213,14 @@ impl BackFillSync { match self.state() { BackFillState::Syncing => {} // already syncing ignore. BackFillState::Paused => { - if self.network_globals.peers().synced_peers().next().is_some() { + if self + .network_globals + .peers + .read() + .synced_peers() + .next() + .is_some() + { // If there are peers to resume with, begin the resume. debug!(self.log, "Resuming backfill sync"; "start_epoch" => self.current_start, "awaiting_batches" => self.batches.len(), "processing_target" => self.processing_target); self.set_state(BackFillState::Syncing); @@ -534,7 +541,15 @@ impl BackFillSync { // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result(network, batch_id, &BatchProcessResult::Failed(false)) + self.on_batch_process_result( + network, + batch_id, + &BatchProcessResult::Failed { + imported_blocks: false, + // The beacon processor queue is full, no need to penalize the peer. + peer_action: None, + }, + ) } else { Ok(ProcessResult::Successful) } @@ -614,7 +629,10 @@ impl BackFillSync { self.process_completed_batches(network) } } - BatchProcessResult::Failed(imported_blocks) => { + BatchProcessResult::Failed { + imported_blocks, + peer_action, + } => { let batch = match self.batches.get_mut(&batch_id) { Some(v) => v, None => { @@ -652,12 +670,20 @@ impl BackFillSync { // that it is likely all peers are sending invalid batches // repeatedly and are either malicious or faulty. We stop the backfill sync and // report all synced peers that have participated. - let action = PeerAction::LowToleranceError; - warn!(self.log, "Backfill batch failed to download. Penalizing peers"; - "score_adjustment" => %action, - "batch_epoch"=> batch_id); - for peer in self.participating_peers.drain() { - network.report_peer(peer, action); + warn!( + self.log, + "Backfill batch failed to download. Penalizing peers"; + "score_adjustment" => %peer_action + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "None".into()), + "batch_epoch"=> batch_id + ); + + if let Some(peer_action) = peer_action { + for peer in self.participating_peers.drain() { + network.report_peer(peer, *peer_action, "backfill_batch_failed"); + } } self.fail_sync(BackFillError::BatchProcessingFailed(batch_id)) .map(|_| ProcessResult::Successful) @@ -778,7 +804,11 @@ impl BackFillSync { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "backfill_reprocessed_original_peer", + ); } else { // The same peer corrected it's previous mistake. There was an error, so we // negative score the original peer. @@ -787,7 +817,11 @@ impl BackFillSync { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "backfill_reprocessed_same_peer", + ); } } } @@ -899,7 +933,8 @@ impl BackFillSync { let new_peer = { let mut priorized_peers = self .network_globals - .peers() + .peers + .read() .synced_peers() .map(|peer| { ( @@ -1018,7 +1053,8 @@ impl BackFillSync { let mut rng = rand::thread_rng(); let mut idle_peers = self .network_globals - .peers() + .peers + .read() .synced_peers() .filter(|peer_id| { self.active_requests diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 4d353bd7f2..32f2a26367 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -137,7 +137,10 @@ pub enum BatchProcessResult { /// The batch was completed successfully. It carries whether the sent batch contained blocks. Success(bool), /// The batch processing failed. It carries whether the processing imported any block. - Failed(bool), + Failed { + imported_blocks: bool, + peer_action: Option, + }, } /// Maintains a sequential list of parents to lookup and the lookup's current state. @@ -294,7 +297,7 @@ impl SyncManager { let sync_type = remote_sync_type(&local, &remote, &self.chain); // update the state of the peer. - let should_add = self.update_peer_sync_state(peer_id, &local, &remote, &sync_type); + let should_add = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type); if matches!(sync_type, PeerSyncType::Advanced) && should_add { self.range_sync @@ -366,8 +369,11 @@ impl SyncManager { } else { crit!(self.log, "Parent chain has no blocks"); } - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "bbroot_failed_chains", + ); return; } // add the block to response @@ -385,8 +391,11 @@ impl SyncManager { // tolerate this behaviour. if !single_block_request.block_returned { warn!(self.log, "Peer didn't respond with a block it referenced"; "referenced_block_hash" => %single_block_request.hash, "peer_id" => %peer_id); - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "bbroot_no_block", + ); } return; } @@ -509,8 +518,11 @@ impl SyncManager { warn!(self.log, "Single block lookup failed"; "outcome" => ?outcome); // This could be a range of errors. But we couldn't process the block. // For now we consider this a mid tolerance error. - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "single_block_lookup_failed", + ); } } } @@ -646,7 +658,7 @@ impl SyncManager { /// connection status. fn update_peer_sync_state( &mut self, - peer_id: PeerId, + peer_id: &PeerId, local_sync_info: &SyncInfo, remote_sync_info: &SyncInfo, sync_type: &PeerSyncType, @@ -656,10 +668,15 @@ impl SyncManager { let new_state = sync_type.as_sync_status(remote_sync_info); let rpr = new_state.as_str(); - - if let Some(info) = self.network_globals.peers().peer_info(&peer_id) { - let is_connected = info.is_connected(); - if !info.sync_status().is_same_kind(&new_state) { + // Drop the write lock + let update_sync_status = self + .network_globals + .peers + .write() + .update_sync_status(peer_id, new_state.clone()); + if let Some(was_updated) = update_sync_status { + let is_connected = self.network_globals.peers.read().is_connected(peer_id); + if was_updated { debug!(self.log, "Peer transitioned sync state"; "peer_id" => %peer_id, "new_state" => rpr, "our_head_slot" => local_sync_info.head_slot, "out_finalized_epoch" => local_sync_info.finalized_epoch, "their_head_slot" => remote_sync_info.head_slot, "their_finalized_epoch" => remote_sync_info.finalized_epoch, @@ -670,8 +687,6 @@ impl SyncManager { if new_state.is_synced() { self.backfill_sync.fully_synced_peer_joined(); } - - self.network.update_peer_sync_status(peer_id, new_state); } is_connected } else { @@ -709,7 +724,7 @@ impl SyncManager { let head = self.chain.best_slot().unwrap_or_else(|_| Slot::new(0)); let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0)); - let peers = self.network_globals.peers(); + let peers = self.network_globals.peers.read(); if current_slot >= head && current_slot.sub(head) <= (SLOT_IMPORT_TOLERANCE as u64) && head > 0 @@ -830,8 +845,11 @@ impl SyncManager { self.request_parent(parent_request); // We do not tolerate these kinds of errors. We will accept a few but these are signs // of a faulty peer. - self.network - .report_peer(peer, PeerAction::LowToleranceError); + self.network.report_peer( + peer, + PeerAction::LowToleranceError, + "parent_request_bad_hash", + ); } else { // The last block in the queue is the only one that has not attempted to be processed yet. // @@ -901,6 +919,7 @@ impl SyncManager { self.network.report_peer( parent_request.last_submitted_peer, PeerAction::MidToleranceError, + "parent_request_err", ); } } @@ -939,6 +958,7 @@ impl SyncManager { self.network.report_peer( parent_request.last_submitted_peer, PeerAction::LowToleranceError, + "request_parent_import_failed", ); return; // drop the request } @@ -1106,8 +1126,11 @@ impl SyncManager { // A peer sent an object (block or attestation) that referenced a parent. // The processing of this chain failed. self.failed_chains.insert(chain_head); - self.network - .report_peer(peer_id, PeerAction::MidToleranceError); + self.network.report_peer( + peer_id, + PeerAction::MidToleranceError, + "parent_lookup_failed", + ); } } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 895828f5d4..9415f21002 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -10,9 +10,7 @@ use fnv::FnvHashMap; use lighthouse_network::rpc::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, RequestId, }; -use lighthouse_network::{ - Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request, SyncStatus, -}; +use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use slog::{debug, trace, warn}; use std::sync::Arc; use tokio::sync::mpsc; @@ -54,15 +52,20 @@ impl SyncNetworkContext { /// Returns the Client type of the peer if known pub fn client_type(&self, peer_id: &PeerId) -> Client { - self.network_globals.client(peer_id) + self.network_globals + .peers + .read() + .peer_info(peer_id) + .map(|info| info.client().clone()) + .unwrap_or_default() } pub fn status_peers( &mut self, - chain: C, + chain: &C, peers: impl Iterator, ) { - if let Ok(status_message) = &chain.status_message() { + if let Ok(status_message) = chain.status_message() { for peer_id in peers { debug!( self.log, @@ -167,13 +170,14 @@ impl SyncNetworkContext { } /// Reports to the scoring algorithm the behaviour of a peer. - pub fn report_peer(&mut self, peer_id: PeerId, action: PeerAction) { + pub fn report_peer(&mut self, peer_id: PeerId, action: PeerAction, msg: &'static str) { debug!(self.log, "Sync reporting peer"; "peer_id" => %peer_id, "action" => %action); self.network_send .send(NetworkMessage::ReportPeer { peer_id, action, source: ReportSource::SyncService, + msg, }) .unwrap_or_else(|e| { warn!(self.log, "Could not report peer, channel failed"; "error"=> %e); @@ -205,17 +209,10 @@ impl SyncNetworkContext { }); } - pub fn update_peer_sync_status(&self, peer_id: PeerId, new_status: SyncStatus) { - let _ = self.send_network_msg(NetworkMessage::UpdatePeerSyncStatus { - peer_id, - sync_status: new_status, - }); - } - /// Sends an arbitrary network message. - fn send_network_msg(&self, msg: NetworkMessage) -> Result<(), &'static str> { - self.network_send.send(msg).map_err(|msg| { - warn!(self.log, "Could not send message to the network service"; "msg" => ?msg.0); + fn send_network_msg(&mut self, msg: NetworkMessage) -> Result<(), &'static str> { + self.network_send.send(msg).map_err(|_| { + debug!(self.log, "Could not send message to the network service"); "Network channel send Failed" }) } diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 70e27b5a0a..e0b15cb498 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,7 +1,6 @@ use crate::sync::RequestId; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::PeerId; -use ssz::Encode; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; @@ -390,7 +389,7 @@ impl Attempt { #[allow(clippy::ptr_arg)] fn new(peer_id: PeerId, blocks: &Vec>) -> Self { let mut hasher = std::collections::hash_map::DefaultHasher::new(); - blocks.as_ssz_bytes().hash(&mut hasher); + blocks.hash(&mut hasher); let hash = hasher.finish(); Attempt { peer_id, hash } } diff --git a/beacon_node/network/src/sync/range_sync/block_storage.rs b/beacon_node/network/src/sync/range_sync/block_storage.rs index 5590ac6234..5f8033bc51 100644 --- a/beacon_node/network/src/sync/range_sync/block_storage.rs +++ b/beacon_node/network/src/sync/range_sync/block_storage.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use beacon_chain::{BeaconChain, BeaconChainTypes}; use types::Hash256; @@ -8,7 +6,7 @@ pub trait BlockStorage { fn is_block_known(&self, block_root: &Hash256) -> bool; } -impl BlockStorage for Arc> { +impl BlockStorage for BeaconChain { fn is_block_known(&self, block_root: &Hash256) -> bool { self.fork_choice.read().contains_block(block_root) } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index a1acac614e..4474f1cc34 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -313,7 +313,14 @@ impl SyncingChain { // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result(network, batch_id, &BatchProcessResult::Failed(false)) + self.on_batch_process_result( + network, + batch_id, + &BatchProcessResult::Failed { + imported_blocks: false, + peer_action: None, + }, + ) } else { Ok(KeepChain) } @@ -488,7 +495,10 @@ impl SyncingChain { self.process_completed_batches(network) } } - BatchProcessResult::Failed(imported_blocks) => { + BatchProcessResult::Failed { + imported_blocks, + peer_action, + } => { let batch = self.batches.get_mut(&batch_id).ok_or_else(|| { RemoveChain::WrongChainState(format!( "Batch not found for current processing target {}", @@ -511,12 +521,20 @@ impl SyncingChain { // report all peers. // There are some edge cases with forks that could land us in this situation. // This should be unlikely, so we tolerate these errors, but not often. - let action = PeerAction::LowToleranceError; - warn!(self.log, "Batch failed to download. Dropping chain scoring peers"; - "score_adjustment" => %action, - "batch_epoch"=> batch_id); - for (peer, _) in self.peers.drain() { - network.report_peer(peer, action); + warn!( + self.log, + "Batch failed to download. Dropping chain scoring peers"; + "score_adjustment" => %peer_action + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "None".into()), + "batch_epoch"=> batch_id + ); + + if let Some(peer_action) = peer_action { + for (peer, _) in self.peers.drain() { + network.report_peer(peer, *peer_action, "batch_failed"); + } } Err(RemoveChain::ChainFailed(batch_id)) } else { @@ -606,7 +624,11 @@ impl SyncingChain { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "batch_reprocessed_original_peer", + ); } else { // The same peer corrected it's previous mistake. There was an error, so we // negative score the original peer. @@ -615,7 +637,11 @@ impl SyncingChain { "batch_epoch" => id, "score_adjustment" => %action, "original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id ); - network.report_peer(attempt.peer_id, action); + network.report_peer( + attempt.peer_id, + action, + "batch_reprocessed_same_peer", + ); } } } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 4dc9c1d01c..512f7a989a 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -17,6 +17,7 @@ use slog::{crit, debug, error}; use smallvec::SmallVec; use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; use types::{Epoch, Hash256, Slot}; @@ -41,7 +42,7 @@ pub enum RangeSyncState { /// A collection of finalized and head chains currently being processed. pub struct ChainCollection { /// The beacon chain for processing. - beacon_chain: C, + beacon_chain: Arc, /// The set of finalized chains being synced. finalized_chains: FnvHashMap>, /// The set of head chains being synced. @@ -53,7 +54,7 @@ pub struct ChainCollection { } impl ChainCollection { - pub fn new(beacon_chain: C, log: slog::Logger) -> Self { + pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { ChainCollection { beacon_chain, finalized_chains: FnvHashMap::default(), @@ -406,6 +407,7 @@ impl ChainCollection { local_info: &SyncInfo, awaiting_head_peers: &mut HashMap, ) { + debug!(self.log, "Purging chains"); let local_finalized_slot = local_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -414,7 +416,10 @@ impl ChainCollection { let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { - target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root) + let is = + target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root); + debug!(log_ref, "Chain is outdated {}", is); + is }; // Retain only head peers that remain relevant @@ -424,31 +429,35 @@ impl ChainCollection { // Remove chains that are out-dated let mut removed_chains = Vec::new(); - self.finalized_chains.retain(|id, chain| { + removed_chains.extend(self.finalized_chains.iter().filter_map(|(id, chain)| { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { debug!(log_ref, "Purging out of finalized chain"; &chain); - removed_chains.push((*id, chain.is_syncing(), RangeSyncType::Finalized)); - false + Some((*id, chain.is_syncing(), RangeSyncType::Finalized)) } else { - true + None } - }); - self.head_chains.retain(|id, chain| { + })); + + removed_chains.extend(self.head_chains.iter().filter_map(|(id, chain)| { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { debug!(log_ref, "Purging out of date head chain"; &chain); - removed_chains.push((*id, chain.is_syncing(), RangeSyncType::Head)); - false + Some((*id, chain.is_syncing(), RangeSyncType::Head)) } else { - true + None } - }); + })); // update the state of the collection for (id, was_syncing, sync_type) in removed_chains { + // remove each chain, updating the state for each removal. + match sync_type { + RangeSyncType::Finalized => self.finalized_chains.remove(&id), + RangeSyncType::Head => self.head_chains.remove(&id), + }; self.on_chain_removed(&id, was_syncing, sync_type); } } diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 2786ef410d..ffe74ea985 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -59,9 +59,9 @@ use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; /// The primary object dealing with long range/batch syncing. This contains all the active and /// non-active chains that need to be processed before the syncing is considered complete. This /// holds the current state of the long range sync. -pub struct RangeSync>> { +pub struct RangeSync> { /// The beacon chain for processing. - beacon_chain: C, + beacon_chain: Arc, /// Last known sync info of our useful connected peers. We use this information to create Head /// chains after all finalized chains have ended. awaiting_head_peers: HashMap, @@ -76,11 +76,11 @@ pub struct RangeSync>> { impl RangeSync where - C: BlockStorage + Clone + ToStatusMessage, + C: BlockStorage + ToStatusMessage, T: BeaconChainTypes, { pub fn new( - beacon_chain: C, + beacon_chain: Arc, beacon_processor_send: mpsc::Sender>, log: slog::Logger, ) -> Self { @@ -125,7 +125,7 @@ where // is OK since we since only one finalized chain at a time. // determine which kind of sync to perform and set up the chains - match RangeSyncType::new(&self.beacon_chain, &local_info, &remote_info) { + match RangeSyncType::new(self.beacon_chain.as_ref(), &local_info, &remote_info) { RangeSyncType::Finalized => { // Finalized chain search debug!(self.log, "Finalization sync peer joined"; "peer_id" => %peer_id); @@ -337,7 +337,7 @@ where debug!(self.log, "Chain removed"; "sync_type" => ?sync_type, &chain, "reason" => ?remove_reason, "op" => op); } - network.status_peers(self.beacon_chain.clone(), chain.peers()); + network.status_peers(self.beacon_chain.as_ref(), chain.peers()); let local = match self.beacon_chain.status_message() { Ok(status) => SyncInfo { @@ -371,26 +371,26 @@ mod tests { use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::parking_lot::RwLock; use lighthouse_network::rpc::BlocksByRangeRequest; - use lighthouse_network::{libp2p, Request}; + use lighthouse_network::Request; use lighthouse_network::{rpc::StatusMessage, NetworkGlobals}; use slog::{o, Drain}; use slot_clock::SystemTimeSlotClock; - use std::sync::atomic::AtomicBool; + use std::collections::HashSet; use std::sync::Arc; use store::MemoryStore; use types::{Hash256, MinimalEthSpec as E}; #[derive(Debug)] struct FakeStorage { - is_block_known: AtomicBool, + known_blocks: RwLock>, status: RwLock, } impl Default for FakeStorage { fn default() -> Self { FakeStorage { - is_block_known: AtomicBool::new(false), + known_blocks: RwLock::new(HashSet::new()), status: RwLock::new(StatusMessage { fork_digest: [0; 4], finalized_root: Hash256::zero(), @@ -402,14 +402,24 @@ mod tests { } } - impl BlockStorage for Arc { - fn is_block_known(&self, _block_root: &store::Hash256) -> bool { - self.is_block_known - .load(std::sync::atomic::Ordering::Relaxed) + impl FakeStorage { + fn remember_block(&self, block_root: Hash256) { + self.known_blocks.write().insert(block_root); + } + + #[allow(dead_code)] + fn forget_block(&self, block_root: &Hash256) { + self.known_blocks.write().remove(block_root); } } - impl ToStatusMessage for Arc { + impl BlockStorage for FakeStorage { + fn is_block_known(&self, block_root: &store::Hash256) -> bool { + self.known_blocks.read().contains(block_root) + } + } + + impl ToStatusMessage for FakeStorage { fn status_message(&self) -> Result { Ok(self.status.read().clone()) } @@ -446,7 +456,7 @@ mod tests { globals: Arc>, } - impl RangeSync> { + impl RangeSync { fn assert_state(&self, expected_state: RangeSyncType) { assert_eq!( self.state() @@ -456,6 +466,14 @@ mod tests { expected_state ) } + + #[allow(dead_code)] + fn assert_not_syncing(&self) { + assert!( + self.state().expect("State is ok").is_none(), + "Range should not be syncing." + ); + } } impl TestRig { @@ -525,7 +543,7 @@ mod tests { let local_info = self.local_info(); let finalized_root = Hash256::random(); - let finalized_epoch = local_info.finalized_epoch + 1; + let finalized_epoch = local_info.finalized_epoch + 2; let head_slot = finalized_epoch.start_slot(E::slots_per_epoch()); let head_root = Hash256::random(); let remote_info = SyncInfo { @@ -540,39 +558,17 @@ mod tests { } } - fn range(log_enabled: bool) -> (TestRig, RangeSync>) { + fn range(log_enabled: bool) -> (TestRig, RangeSync) { let chain = Arc::new(FakeStorage::default()); let log = build_log(slog::Level::Trace, log_enabled); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(10); - let range_sync = RangeSync::>::new( + let range_sync = RangeSync::::new( chain.clone(), beacon_processor_tx, log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); - let globals = { - use lighthouse_network::discovery::enr_ext::CombinedKeyExt; - use lighthouse_network::discv5::enr::CombinedKey; - use lighthouse_network::discv5::enr::EnrBuilder; - use lighthouse_network::rpc::methods::{MetaData, MetaDataV2}; - - let keypair = libp2p::identity::Keypair::generate_secp256k1(); - let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); - let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); - let globals = NetworkGlobals::new( - enr, - 9000, - 9000, - MetaData::V2(MetaDataV2 { - seq_number: 0, - attnets: Default::default(), - syncnets: Default::default(), - }), - vec![], - &log, - ); - Arc::new(globals) - }; + let globals = Arc::new(NetworkGlobals::new_test_globals(&log)); let cx = SyncNetworkContext::new( network_tx, globals.clone(), @@ -592,7 +588,7 @@ mod tests { #[test] fn head_chain_removed_while_finalized_syncing() { // NOTE: this is a regression test. - let (mut rig, mut range) = range(true); + let (mut rig, mut range) = range(false); // Get a peer with an advanced head let (head_peer, local_info, remote_info) = rig.head_peer(); @@ -614,4 +610,36 @@ mod tests { range.remove_peer(&mut rig.cx, &head_peer); range.assert_state(RangeSyncType::Finalized); } + + #[test] + fn state_update_while_purging() { + // NOTE: this is a regression test. + let (mut rig, mut range) = range(true); + + // Get a peer with an advanced head + let (head_peer, local_info, head_info) = rig.head_peer(); + let head_peer_root = head_info.head_root; + range.add_peer(&mut rig.cx, local_info, head_peer, head_info); + range.assert_state(RangeSyncType::Head); + + // Sync should have requested a batch, grab the request. + let _request = rig.grab_request(&head_peer); + + // Now get a peer with an advanced finalized epoch. + let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); + let finalized_peer_root = remote_info.finalized_root; + range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); + range.assert_state(RangeSyncType::Finalized); + + // Sync should have requested a batch, grab the request + let _second_request = rig.grab_request(&finalized_peer); + + // Now the chain knows both chains target roots. + rig.chain.remember_block(head_peer_root); + rig.chain.remember_block(finalized_peer_root); + + // Add an additional peer to the second chain to make range update it's status + let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); + range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); + } } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index a63e2808f2..449a2f59d7 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -12,7 +12,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } parking_lot = "0.11.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" rayon = "1.5.0" serde = "1.0.116" diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index c756b8a6cb..4c2960c9d6 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -104,6 +104,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network. Multiaddr is also supported.") .takes_value(true), ) + .arg( + Arg::with_name("network-load") + .long("network-load") + .value_name("INTEGER") + .help("Lighthouse's network can be tuned for bandwidth/performance. Setting this to a high value, will increase the bandwidth lighthouse uses, increasing the likelihood of redundant information in exchange for faster communication. This can increase profit of validators marginally by receiving messages faster on the network. Lower values decrease bandwidth usage, but makes communication slower which can lead to validator performance reduction. Values are in the range [1,5].") + .default_value("3") + .set(clap::ArgSettings::Hidden) + .takes_value(true), + ) .arg( Arg::with_name("disable-upnp") .long("disable-upnp") @@ -240,6 +249,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { over TLS. Must not be password-protected.") .takes_value(true) ) + .arg( + Arg::with_name("http-allow-sync-stalled") + .long("http-allow-sync-stalled") + .help("Forces the HTTP to indicate that the node is synced when sync is actually \ + stalled. This is useful for very small testnets. TESTING ONLY. DO NOT USE ON \ + MAINNET.") + ) /* Prometheus metrics HTTP server related arguments */ .arg( Arg::with_name("metrics") @@ -371,6 +387,38 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Specifies how many blocks the database should cache in memory [default: 5]") .takes_value(true) ) + /* + * Execution Layer Integration + */ + .arg( + Arg::with_name("merge") + .long("merge") + .help("Enable the features necessary to run merge testnets. This feature \ + is unstable and is for developers only.") + .takes_value(false), + ) + .arg( + Arg::with_name("execution-endpoints") + .long("execution-endpoints") + .value_name("EXECUTION-ENDPOINTS") + .help("One or more comma-delimited server endpoints for HTTP JSON-RPC connection. \ + If multiple endpoints are given the endpoints are used as fallback in the \ + given order. Also enables the --merge flag. \ + If this flag is omitted and the --eth1-endpoints is supplied, those values \ + will be used. Defaults to http://127.0.0.1:8545.") + .takes_value(true) + ) + .arg( + Arg::with_name("fee-recipient") + .long("fee-recipient") + .value_name("FEE-RECIPIENT") + .help("Once the merge has happened, this address will receive transaction fees \ + collected from any blocks produced by this node. Defaults to a junk \ + address whilst the merge is in development stages. THE DEFAULT VALUE \ + WILL BE REMOVED BEFORE THE MERGE ENTERS PRODUCTION") + .requires("merge") + .takes_value(true) + ) /* * Database purging and compaction. @@ -478,12 +526,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("slasher-max-db-size") .long("slasher-max-db-size") .help( - "Maximum size of the LMDB database used by the slasher." + "Maximum size of the MDBX database used by the slasher." ) .value_name("GIGABYTES") .requires("slasher") .takes_value(true) ) + .arg( + Arg::with_name("slasher-att-cache-size") + .long("slasher-att-cache-size") + .help("Set the maximum number of attestation roots for the slasher to cache") + .value_name("COUNT") + .requires("slasher") + .takes_value(true) + ) .arg( Arg::with_name("slasher-chunk-size") .long("slasher-chunk-size") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 52a0932615..2040822931 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,8 +1,8 @@ use clap::ArgMatches; -use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, BAD_TESTNET_DIR_MESSAGE}; +use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; -use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; +use environment::RuntimeContext; use http_api::TlsConfig; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; @@ -14,7 +14,12 @@ use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::net::{TcpListener, UdpSocket}; use std::path::{Path, PathBuf}; use std::str::FromStr; -use types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; +use types::{Address, Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAFFITI_BYTES_LEN}; + +// TODO(merge): remove this default value. It's just there to make life easy during +// early testnets. +const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; /// Gets the fully-initialized global client. /// @@ -25,9 +30,11 @@ use types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, PublicKeyBytes, GRAF /// response of some remote server. pub fn get_config( cli_args: &ArgMatches, - spec: &ChainSpec, - log: Logger, + context: &RuntimeContext, ) -> Result { + let spec = &context.eth2_config.spec; + let log = context.log(); + let mut client_config = ClientConfig { data_dir: get_data_dir(cli_args), ..Default::default() @@ -36,19 +43,17 @@ pub fn get_config( // If necessary, remove any existing database and configuration if client_config.data_dir.exists() && cli_args.is_present("purge-db") { // Remove the chain_db. - let chain_db = client_config.get_db_path().ok_or("Failed to get db_path")?; + let chain_db = client_config.get_db_path(); if chain_db.exists() { fs::remove_dir_all(chain_db) .map_err(|err| format!("Failed to remove chain_db: {}", err))?; } // Remove the freezer db. - let freezer_db = client_config - .get_freezer_db_path() - .ok_or("Failed to get freezer db path")?; + let freezer_db = client_config.get_freezer_db_path(); if freezer_db.exists() { fs::remove_dir_all(freezer_db) - .map_err(|err| format!("Failed to remove chain_db: {}", err))?; + .map_err(|err| format!("Failed to remove freezer_db: {}", err))?; } } @@ -69,7 +74,7 @@ pub fn get_config( &mut client_config.network, cli_args, &client_config.data_dir, - &log, + log, false, )?; @@ -131,6 +136,10 @@ pub fn get_config( }); } + if cli_args.is_present("http-allow-sync-stalled") { + client_config.http_api.allow_sync_stalled = true; + } + /* * Prometheus metrics HTTP server */ @@ -217,7 +226,7 @@ pub fn get_config( client_config.sync_eth1_chain = true; client_config.eth1.endpoints = endpoints .split(',') - .map(|s| SensitiveUrl::parse(s)) + .map(SensitiveUrl::parse) .collect::>() .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; } @@ -232,6 +241,25 @@ pub fn get_config( client_config.eth1.purge_cache = true; } + if let Some(endpoints) = cli_args.value_of("execution-endpoints") { + client_config.sync_eth1_chain = true; + client_config.execution_endpoints = endpoints + .split(',') + .map(SensitiveUrl::parse) + .collect::>() + .map(Some) + .map_err(|e| format!("execution-endpoints contains an invalid URL {:?}", e))?; + } else if cli_args.is_present("merge") { + client_config.execution_endpoints = Some(client_config.eth1.endpoints.clone()); + } + + client_config.suggested_fee_recipient = Some( + clap_utils::parse_optional(cli_args, "fee-recipient")? + // TODO(merge): remove this default value. It's just there to make life easy during + // early testnets. + .unwrap_or_else(|| Address::from(DEFAULT_SUGGESTED_FEE_RECIPIENT)), + ); + if let Some(freezer_dir) = cli_args.value_of("freezer-dir") { client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); } @@ -284,7 +312,10 @@ pub fn get_config( /* * Load the eth2 network dir to obtain some additional config values. */ - let eth2_network_config = get_eth2_network_config(cli_args)?; + let eth2_network_config = context + .eth2_network_config + .as_ref() + .ok_or("Context is missing eth2 network config")?; client_config.eth1.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); client_config.eth1.deposit_contract_deploy_block = @@ -307,13 +338,16 @@ pub fn get_config( // Only append network config bootnodes if discovery is not disabled if !client_config.network.disable_discovery { - if let Some(mut boot_nodes) = eth2_network_config.boot_enr { - client_config.network.boot_nodes_enr.append(&mut boot_nodes) + if let Some(boot_nodes) = ð2_network_config.boot_enr { + client_config + .network + .boot_nodes_enr + .extend_from_slice(boot_nodes) } } client_config.genesis = if let Some(genesis_state_bytes) = - eth2_network_config.genesis_state_bytes + eth2_network_config.genesis_state_bytes.clone() { // Set up weak subjectivity sync, or start from the hardcoded genesis state. if let (Some(initial_state_path), Some(initial_block_path)) = ( @@ -434,6 +468,9 @@ pub fn get_config( }; } + client_config.chain.max_network_size = + lighthouse_network::gossip_max_size(spec.bellatrix_fork_epoch.is_some()); + if cli_args.is_present("slasher") { let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { PathBuf::from(slasher_dir) @@ -473,6 +510,12 @@ pub fn get_config( slasher_config.max_db_size_mbs = max_db_size_gbs * 1024; } + if let Some(attestation_cache_size) = + clap_utils::parse_optional(cli_args, "slasher-att-cache-size")? + { + slasher_config.attestation_root_cache_size = attestation_cache_size; + } + if let Some(chunk_size) = clap_utils::parse_optional(cli_args, "slasher-chunk-size")? { slasher_config.chunk_size = chunk_size; } @@ -583,6 +626,13 @@ pub fn set_network_config( config.discovery_port = port; } + if let Some(value) = cli_args.value_of("network-load") { + let network_load = value + .parse::() + .map_err(|_| format!("Invalid integer: {}", value))?; + config.network_load = network_load; + } + if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { let mut enrs: Vec = vec![]; let mut multiaddrs: Vec = vec![]; @@ -671,7 +721,7 @@ pub fn set_network_config( None } }) { - addr.push_str(&format!(":{}", enr_udp_port.to_string())); + addr.push_str(&format!(":{}", enr_udp_port)); } else { return Err( "enr-udp-port must be set for node to be discoverable with dns address" @@ -745,20 +795,6 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { .unwrap_or_else(|| PathBuf::from(".")) } -/// Try to parse the eth2 network config from the `network`, `testnet-dir` flags in that order. -/// Returns the default hardcoded testnet if neither flags are set. -pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result { - let optional_network_config = if cli_args.is_present("network") { - clap_utils::parse_hardcoded_network(cli_args, "network")? - } else if cli_args.is_present("testnet-dir") { - clap_utils::parse_testnet_dir(cli_args, "testnet-dir")? - } else { - // if neither is present, assume the default network - Eth2NetworkConfig::constant(DEFAULT_HARDCODED_NETWORK)? - }; - optional_network_config.ok_or_else(|| BAD_TESTNET_DIR_MESSAGE.to_string()) -} - /// A bit of hack to find an unused port. /// /// Does not guarantee that the given port is unused after the function exits, just that it was diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index d452e3e463..773a0d2eb1 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -13,7 +13,7 @@ use beacon_chain::{ use clap::ArgMatches; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; -pub use config::{get_config, get_data_dir, get_eth2_network_config, set_network_config}; +pub use config::{get_config, get_data_dir, set_network_config}; use environment::RuntimeContext; pub use eth2_config::Eth2Config; use slasher::Slasher; @@ -46,8 +46,7 @@ impl ProductionBeaconNode { context: RuntimeContext, matches: ArgMatches<'static>, ) -> Result { - let client_config = - get_config::(&matches, &context.eth2_config().spec, context.log().clone())?; + let client_config = get_config::(&matches, &context)?; Self::new(context, client_config).await } @@ -67,6 +66,15 @@ impl ProductionBeaconNode { let freezer_db_path = client_config.create_freezer_db_path()?; let executor = context.executor.clone(); + if let Some(legacy_dir) = client_config.get_existing_legacy_data_dir() { + warn!( + log, + "Legacy datadir location"; + "msg" => "this occurs when using relative paths for a datadir location", + "location" => ?legacy_dir, + ) + } + if !client_config.chain.enable_lock_timeouts { info!(log, "Disabling lock timeouts globally"); TimeoutRwLock::disable_timeouts() @@ -76,7 +84,13 @@ impl ProductionBeaconNode { .runtime_context(context) .chain_spec(spec) .http_api_config(client_config.http_api.clone()) - .disk_store(&datadir, &db_path, &freezer_db_path, store_config)?; + .disk_store( + &datadir, + &db_path, + &freezer_db_path, + store_config, + log.clone(), + )?; let builder = if let Some(slasher_config) = client_config.slasher.clone() { let slasher = Arc::new( diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 74b38e2034..e6c52ba91e 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -13,7 +13,7 @@ db-key = "0.0.5" leveldb = { version = "0.8.6", default-features = false } parking_lot = "0.11.0" itertools = "0.10.0" -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } @@ -22,8 +22,8 @@ serde = "1.0.116" serde_derive = "1.0.116" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lru = "0.6.0" -sloggers = "2.0.2" +lru = "0.7.1" +sloggers = { version = "2.1.1", features = ["json"] } directory = { path = "../../common/directory" } tree_hash = "0.4.0" diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs index 7d47e8c99a..8ef0b6d201 100644 --- a/beacon_node/store/src/chunked_iter.rs +++ b/beacon_node/store/src/chunked_iter.rs @@ -1,27 +1,26 @@ use crate::chunked_vector::{chunk_key, Chunk, Field}; use crate::{HotColdDB, ItemStore}; use slog::error; -use std::sync::Arc; use types::{ChainSpec, EthSpec, Slot}; /// Iterator over the values of a `BeaconState` vector field (like `block_roots`). /// /// Uses the freezer DB's separate table to load the values. -pub struct ChunkedVectorIter +pub struct ChunkedVectorIter<'a, F, E, Hot, Cold> where F: Field, E: EthSpec, Hot: ItemStore, Cold: ItemStore, { - pub(crate) store: Arc>, + pub(crate) store: &'a HotColdDB, current_vindex: usize, pub(crate) end_vindex: usize, next_cindex: usize, current_chunk: Chunk, } -impl ChunkedVectorIter +impl<'a, F, E, Hot, Cold> ChunkedVectorIter<'a, F, E, Hot, Cold> where F: Field, E: EthSpec, @@ -35,7 +34,7 @@ where /// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can /// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`). pub fn new( - store: Arc>, + store: &'a HotColdDB, start_vindex: usize, last_restore_point_slot: Slot, spec: &ChainSpec, @@ -57,7 +56,7 @@ where } } -impl Iterator for ChunkedVectorIter +impl<'a, F, E, Hot, Cold> Iterator for ChunkedVectorIter<'a, F, E, Hot, Cold> where F: Field, E: EthSpec, diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 34829f9e32..48c3e92289 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -2,6 +2,7 @@ use crate::chunked_vector::ChunkError; use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use ssz::DecodeError; +use state_processing::BlockReplayError; use types::{BeaconStateError, Hash256, Slot}; #[cfg(feature = "milhouse")] @@ -42,6 +43,7 @@ pub enum Error { expected: Hash256, computed: Hash256, }, + BlockReplayError(BlockReplayError), #[cfg(feature = "milhouse")] MilhouseError(milhouse::Error), } @@ -103,6 +105,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: BlockReplayError) -> Error { + Error::BlockReplayError(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 5a77863d54..353be6bf05 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -1,74 +1,33 @@ use crate::chunked_iter::ChunkedVectorIter; -use crate::chunked_vector::{BlockRoots, StateRoots}; +use crate::chunked_vector::{BlockRoots, Field, StateRoots}; use crate::errors::{Error, Result}; use crate::iter::{BlockRootsIterator, StateRootsIterator}; use crate::{HotColdDB, ItemStore}; use itertools::process_results; -use std::sync::Arc; use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; -/// Forwards block roots iterator that makes use of the `block_roots` table in the freezer DB. -pub struct FrozenForwardsBlockRootsIterator, Cold: ItemStore> { - inner: ChunkedVectorIter, -} +pub type HybridForwardsBlockRootsIterator<'a, E, Hot, Cold> = + HybridForwardsIterator<'a, E, BlockRoots, Hot, Cold>; +pub type HybridForwardsStateRootsIterator<'a, E, Hot, Cold> = + HybridForwardsIterator<'a, E, StateRoots, Hot, Cold>; -/// Forwards block roots iterator that reverses a backwards iterator (only good for short ranges). -pub struct SimpleForwardsBlockRootsIterator { - // Values from the backwards iterator (in slot descending order) - values: Vec<(Hash256, Slot)>, -} - -/// Fusion of the above two approaches to forwards iteration. Fast and efficient. -pub enum HybridForwardsBlockRootsIterator, Cold: ItemStore> { - PreFinalization { - iter: Box>, - /// Data required by the `PostFinalization` iterator when we get to it. - continuation_data: Box, Hash256)>>, - }, - PostFinalization { - iter: SimpleForwardsBlockRootsIterator, - }, -} - -impl, Cold: ItemStore> - FrozenForwardsBlockRootsIterator -{ - pub fn new( - store: Arc>, +/// Trait unifying `BlockRoots` and `StateRoots` for forward iteration. +pub trait Root: Field { + fn simple_forwards_iterator, Cold: ItemStore>( + store: &HotColdDB, start_slot: Slot, - last_restore_point_slot: Slot, - spec: &ChainSpec, - ) -> Self { - Self { - inner: ChunkedVectorIter::new( - store, - start_slot.as_usize(), - last_restore_point_slot, - spec, - ), - } - } + end_state: BeaconState, + end_root: Hash256, + ) -> Result; } -impl, Cold: ItemStore> Iterator - for FrozenForwardsBlockRootsIterator -{ - type Item = (Hash256, Slot); - - fn next(&mut self) -> Option { - self.inner - .next() - .map(|(slot, block_hash)| (block_hash, Slot::from(slot))) - } -} - -impl SimpleForwardsBlockRootsIterator { - pub fn new, Cold: ItemStore>( - store: Arc>, +impl Root for BlockRoots { + fn simple_forwards_iterator, Cold: ItemStore>( + store: &HotColdDB, start_slot: Slot, end_state: BeaconState, end_block_root: Hash256, - ) -> Result { + ) -> Result { // Iterate backwards from the end state, stopping at the start slot. let values = process_results( std::iter::once(Ok((end_block_root, end_state.slot()))) @@ -78,129 +37,41 @@ impl SimpleForwardsBlockRootsIterator { .collect::>() }, )?; - Ok(Self { values }) + Ok(SimpleForwardsIterator { values }) } } -impl Iterator for SimpleForwardsBlockRootsIterator { - type Item = Result<(Hash256, Slot)>; - - fn next(&mut self) -> Option { - // Pop from the end of the vector to get the block roots in slot-ascending order. - Ok(self.values.pop()).transpose() - } -} - -impl, Cold: ItemStore> - HybridForwardsBlockRootsIterator -{ - pub fn new( - store: Arc>, +impl Root for StateRoots { + fn simple_forwards_iterator, Cold: ItemStore>( + store: &HotColdDB, start_slot: Slot, end_state: BeaconState, - end_block_root: Hash256, - spec: &ChainSpec, - ) -> Result { - use HybridForwardsBlockRootsIterator::*; - - let latest_restore_point_slot = store.get_latest_restore_point_slot(); - - let result = if start_slot < latest_restore_point_slot { - PreFinalization { - iter: Box::new(FrozenForwardsBlockRootsIterator::new( - store, - start_slot, - latest_restore_point_slot, - spec, - )), - continuation_data: Box::new(Some((end_state, end_block_root))), - } - } else { - PostFinalization { - iter: SimpleForwardsBlockRootsIterator::new( - store, - start_slot, - end_state, - end_block_root, - )?, - } - }; - - Ok(result) - } - - fn do_next(&mut self) -> Result> { - use HybridForwardsBlockRootsIterator::*; - - match self { - PreFinalization { - iter, - continuation_data, - } => { - match iter.next() { - Some(x) => Ok(Some(x)), - // Once the pre-finalization iterator is consumed, transition - // to a post-finalization iterator beginning from the last slot - // of the pre iterator. - None => { - let (end_state, end_block_root) = - continuation_data.take().ok_or(Error::NoContinuationData)?; - - *self = PostFinalization { - iter: SimpleForwardsBlockRootsIterator::new( - iter.inner.store.clone(), - Slot::from(iter.inner.end_vindex), - end_state, - end_block_root, - )?, - }; - self.do_next() - } - } - } - PostFinalization { iter } => iter.next().transpose(), - } + end_state_root: Hash256, + ) -> Result { + // Iterate backwards from the end state, stopping at the start slot. + let values = process_results( + std::iter::once(Ok((end_state_root, end_state.slot()))) + .chain(StateRootsIterator::owned(store, end_state)), + |iter| { + iter.take_while(|(_, slot)| *slot >= start_slot) + .collect::>() + }, + )?; + Ok(SimpleForwardsIterator { values }) } } -impl, Cold: ItemStore> Iterator - for HybridForwardsBlockRootsIterator +/// Forwards root iterator that makes use of a flat field table in the freezer DB. +pub struct FrozenForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> { - type Item = Result<(Hash256, Slot)>; - - fn next(&mut self) -> Option { - self.do_next().transpose() - } + inner: ChunkedVectorIter<'a, F, E, Hot, Cold>, } -/// Forwards state roots iterator that makes use of the `state_roots` table in the freezer DB. -pub struct FrozenForwardsStateRootsIterator, Cold: ItemStore> { - inner: ChunkedVectorIter, -} - -/// Forwards state roots iterator that reverses a backwards iterator (only good for short ranges). -pub struct SimpleForwardsStateRootsIterator { - // Values from the backwards iterator (in slot descending order) - values: Vec<(Hash256, Slot)>, -} - -/// Fusion of the above two approaches to forwards iteration. Fast and efficient. -pub enum HybridForwardsStateRootsIterator, Cold: ItemStore> { - PreFinalization { - iter: Box>, - /// Data required by the `PostFinalization` iterator when we get to it. - continuation_data: Box, Hash256)>>, - }, - PostFinalization { - iter: SimpleForwardsStateRootsIterator, - }, -} - -impl, Cold: ItemStore> - FrozenForwardsStateRootsIterator +impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> + FrozenForwardsIterator<'a, E, F, Hot, Cold> { pub fn new( - store: Arc>, + store: &'a HotColdDB, start_slot: Slot, last_restore_point_slot: Slot, spec: &ChainSpec, @@ -216,39 +87,25 @@ impl, Cold: ItemStore> } } -impl, Cold: ItemStore> Iterator - for FrozenForwardsStateRootsIterator +impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> Iterator + for FrozenForwardsIterator<'a, E, F, Hot, Cold> { type Item = (Hash256, Slot); fn next(&mut self) -> Option { self.inner .next() - .map(|(slot, state_hash)| (state_hash, Slot::from(slot))) + .map(|(slot, root)| (root, Slot::from(slot))) } } -impl SimpleForwardsStateRootsIterator { - pub fn new, Cold: ItemStore>( - store: Arc>, - start_slot: Slot, - end_state: BeaconState, - end_state_root: Hash256, - ) -> Result { - // Iterate backwards from the end state, stopping at the start slot. - let values = process_results( - std::iter::once(Ok((end_state_root, end_state.slot()))) - .chain(StateRootsIterator::owned(store, end_state)), - |iter| { - iter.take_while(|(_, slot)| *slot >= start_slot) - .collect::>() - }, - )?; - Ok(Self { values }) - } +/// Forwards root iterator that reverses a backwards iterator (only good for short ranges). +pub struct SimpleForwardsIterator { + // Values from the backwards iterator (in slot descending order) + values: Vec<(Hash256, Slot)>, } -impl Iterator for SimpleForwardsStateRootsIterator { +impl Iterator for SimpleForwardsIterator { type Item = Result<(Hash256, Slot)>; fn next(&mut self) -> Option { @@ -257,38 +114,75 @@ impl Iterator for SimpleForwardsStateRootsIterator { } } -impl, Cold: ItemStore> - HybridForwardsStateRootsIterator -{ - pub fn new( - store: Arc>, +/// Fusion of the above two approaches to forwards iteration. Fast and efficient. +pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> { + PreFinalization { + iter: Box>, + /// Data required by the `PostFinalization` iterator when we get to it. + continuation_data: Option, Hash256)>>, + }, + PostFinalizationLazy { + continuation_data: Option, Hash256)>>, + store: &'a HotColdDB, start_slot: Slot, - end_state: BeaconState, - end_state_root: Hash256, + }, + PostFinalization { + iter: SimpleForwardsIterator, + }, +} + +impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> + HybridForwardsIterator<'a, E, F, Hot, Cold> +{ + /// Construct a new hybrid iterator. + /// + /// The `get_state` closure should return a beacon state and final block/state root to backtrack + /// from in the case where the iterated range does not lie entirely within the frozen portion of + /// the database. If an `end_slot` is provided and it is before the database's latest restore + /// point slot then the `get_state` closure will not be called at all. + /// + /// It is OK for `get_state` to hold a lock while this function is evaluated, as the returned + /// iterator is as lazy as possible and won't do any work apart from calling `get_state`. + /// + /// Conversely, if `get_state` does extensive work (e.g. loading data from disk) then this + /// function may block for some time while `get_state` runs. + pub fn new( + store: &'a HotColdDB, + start_slot: Slot, + end_slot: Option, + get_state: impl FnOnce() -> (BeaconState, Hash256), spec: &ChainSpec, ) -> Result { - use HybridForwardsStateRootsIterator::*; + use HybridForwardsIterator::*; let latest_restore_point_slot = store.get_latest_restore_point_slot(); let result = if start_slot < latest_restore_point_slot { + let iter = Box::new(FrozenForwardsIterator::new( + store, + start_slot, + latest_restore_point_slot, + spec, + )); + + // No continuation data is needed if the forwards iterator plans to halt before + // `end_slot`. If it tries to continue further a `NoContinuationData` error will be + // returned. + let continuation_data = + if end_slot.map_or(false, |end_slot| end_slot < latest_restore_point_slot) { + None + } else { + Some(Box::new(get_state())) + }; PreFinalization { - iter: Box::new(FrozenForwardsStateRootsIterator::new( - store, - start_slot, - latest_restore_point_slot, - spec, - )), - continuation_data: Box::new(Some((end_state, end_state_root))), + iter, + continuation_data, } } else { - PostFinalization { - iter: SimpleForwardsStateRootsIterator::new( - store, - start_slot, - end_state, - end_state_root, - )?, + PostFinalizationLazy { + continuation_data: Some(Box::new(get_state())), + store, + start_slot, } }; @@ -296,7 +190,7 @@ impl, Cold: ItemStore> } fn do_next(&mut self) -> Result> { - use HybridForwardsStateRootsIterator::*; + use HybridForwardsIterator::*; match self { PreFinalization { @@ -309,28 +203,39 @@ impl, Cold: ItemStore> // to a post-finalization iterator beginning from the last slot // of the pre iterator. None => { - let (end_state, end_state_root) = - continuation_data.take().ok_or(Error::NoContinuationData)?; + let continuation_data = continuation_data.take(); + let store = iter.inner.store; + let start_slot = Slot::from(iter.inner.end_vindex); - *self = PostFinalization { - iter: SimpleForwardsStateRootsIterator::new( - iter.inner.store.clone(), - Slot::from(iter.inner.end_vindex), - end_state, - end_state_root, - )?, + *self = PostFinalizationLazy { + continuation_data, + store, + start_slot, }; + self.do_next() } } } + PostFinalizationLazy { + continuation_data, + store, + start_slot, + } => { + let (end_state, end_root) = + *continuation_data.take().ok_or(Error::NoContinuationData)?; + *self = PostFinalization { + iter: F::simple_forwards_iterator(store, *start_slot, end_state, end_root)?, + }; + self.do_next() + } PostFinalization { iter } => iter.next().transpose(), } } } -impl, Cold: ItemStore> Iterator - for HybridForwardsStateRootsIterator +impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> Iterator + for HybridForwardsIterator<'a, E, F, Hot, Cold> { type Item = Result<(Hash256, Slot)>; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 0194544c80..62441ce0f2 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -22,12 +22,11 @@ use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; use serde_derive::{Deserialize, Serialize}; -use slog::{debug, error, info, trace, Logger}; +use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ - per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, - SlotProcessingError, + BlockProcessingError, BlockReplayer, SlotProcessingError, StateRootStrategy, }; use std::cmp::min; use std::convert::TryInto; @@ -37,16 +36,6 @@ use std::sync::Arc; use std::time::Duration; use types::*; -/// Defines how blocks should be replayed on states. -#[derive(PartialEq)] -pub enum BlockReplay { - /// Perform all transitions faithfully to the specification. - Accurate, - /// Don't compute state roots, eventually computing an invalid beacon state that can only be - /// used for obtaining shuffling. - InconsistentStateRoots, -} - /// On-disk database that stores finalized states efficiently. /// /// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores @@ -373,10 +362,10 @@ impl, Cold: ItemStore> HotColdDB // chain. This way we avoid returning a state that doesn't match `state_root`. self.load_cold_state(state_root) } else { - self.load_hot_state(state_root, BlockReplay::Accurate) + self.load_hot_state(state_root, StateRootStrategy::Accurate) } } else { - match self.load_hot_state(state_root, BlockReplay::Accurate)? { + match self.load_hot_state(state_root, StateRootStrategy::Accurate)? { Some(state) => Ok(Some(state)), None => self.load_cold_state(state_root), } @@ -414,7 +403,7 @@ impl, Cold: ItemStore> HotColdDB } .into()) } else { - self.load_hot_state(state_root, BlockReplay::InconsistentStateRoots) + self.load_hot_state(state_root, StateRootStrategy::Inconsistent) } } @@ -439,23 +428,55 @@ impl, Cold: ItemStore> HotColdDB } pub fn forwards_block_roots_iterator( - store: Arc, + &self, start_slot: Slot, end_state: BeaconState, end_block_root: Hash256, spec: &ChainSpec, - ) -> Result>, Error> { - HybridForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root, spec) + ) -> Result> + '_, Error> { + HybridForwardsBlockRootsIterator::new( + self, + start_slot, + None, + || (end_state, end_block_root), + spec, + ) + } + + pub fn forwards_block_roots_iterator_until( + &self, + start_slot: Slot, + end_slot: Slot, + get_state: impl FnOnce() -> (BeaconState, Hash256), + spec: &ChainSpec, + ) -> Result, Error> { + HybridForwardsBlockRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) } pub fn forwards_state_roots_iterator( - store: Arc, + &self, start_slot: Slot, end_state_root: Hash256, end_state: BeaconState, spec: &ChainSpec, - ) -> Result>, Error> { - HybridForwardsStateRootsIterator::new(store, start_slot, end_state, end_state_root, spec) + ) -> Result> + '_, Error> { + HybridForwardsStateRootsIterator::new( + self, + start_slot, + None, + || (end_state, end_state_root), + spec, + ) + } + + pub fn forwards_state_roots_iterator_until( + &self, + start_slot: Slot, + end_slot: Slot, + get_state: impl FnOnce() -> (BeaconState, Hash256), + spec: &ChainSpec, + ) -> Result, Error> { + HybridForwardsStateRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) } /// Load an epoch boundary state by using the hot state summary look-up. @@ -472,10 +493,10 @@ impl, Cold: ItemStore> HotColdDB { // NOTE: minor inefficiency here because we load an unnecessary hot state summary // - // `BlockReplay` should be irrelevant here since we never replay blocks for an epoch + // `StateRootStrategy` should be irrelevant here since we never replay blocks for an epoch // boundary state in the hot DB. let state = self - .load_hot_state(&epoch_boundary_state_root, BlockReplay::Accurate)? + .load_hot_state(&epoch_boundary_state_root, StateRootStrategy::Accurate)? .ok_or(HotColdDBError::MissingEpochBoundaryState( epoch_boundary_state_root, ))?; @@ -620,7 +641,7 @@ impl, Cold: ItemStore> HotColdDB pub fn load_hot_state( &self, state_root: &Hash256, - block_replay: BlockReplay, + state_root_strategy: StateRootStrategy, ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); @@ -648,7 +669,13 @@ impl, Cold: ItemStore> HotColdDB } else { let blocks = self.load_blocks_to_replay(boundary_state.slot(), slot, latest_block_root)?; - self.replay_blocks(boundary_state, blocks, slot, block_replay)? + self.replay_blocks( + boundary_state, + blocks, + slot, + no_state_root_iter(), + state_root_strategy, + )? }; Ok(Some(state)) @@ -777,7 +804,22 @@ impl, Cold: ItemStore> HotColdDB )?; // 3. Replay the blocks on top of the low restore point. - self.replay_blocks(low_restore_point, blocks, slot, BlockReplay::Accurate) + // Use a forwards state root iterator to avoid doing any tree hashing. + // The state root of the high restore point should never be used, so is safely set to 0. + let state_root_iter = self.forwards_state_roots_iterator_until( + low_restore_point.slot(), + slot, + || (high_restore_point, Hash256::zero()), + &self.spec, + )?; + + self.replay_blocks( + low_restore_point, + blocks, + slot, + Some(state_root_iter), + StateRootStrategy::Accurate, + ) } /// Get the restore point with the given index, or if it is out of bounds, the split state. @@ -860,85 +902,35 @@ impl, Cold: ItemStore> HotColdDB /// to have any caches built, beyond those immediately required by block processing. fn replay_blocks( &self, - mut state: BeaconState, - mut blocks: Vec>, + state: BeaconState, + blocks: Vec>, target_slot: Slot, - block_replay: BlockReplay, + state_root_iter: Option>>, + state_root_strategy: StateRootStrategy, ) -> Result, Error> { - if block_replay == BlockReplay::InconsistentStateRoots { - for i in 0..blocks.len() { - let prev_block_root = if i > 0 { - blocks[i - 1].canonical_root() - } else { - // Not read. - Hash256::zero() - }; + let mut block_replayer = BlockReplayer::new(state, &self.spec) + .state_root_strategy(state_root_strategy) + .no_signature_verification() + .minimal_block_root_verification(); - let (state_root, parent_root) = match &mut blocks[i] { - SignedBeaconBlock::Base(block) => ( - &mut block.message.state_root, - &mut block.message.parent_root, - ), - SignedBeaconBlock::Altair(block) => ( - &mut block.message.state_root, - &mut block.message.parent_root, - ), - }; + let have_state_root_iterator = state_root_iter.is_some(); + if let Some(state_root_iter) = state_root_iter { + block_replayer = block_replayer.state_root_iter(state_root_iter); + } - *state_root = Hash256::zero(); - if i > 0 { - *parent_root = prev_block_root; + block_replayer + .apply_blocks(blocks, Some(target_slot)) + .map(|block_replayer| { + if have_state_root_iterator && block_replayer.state_root_miss() { + warn!( + self.log, + "State root iterator miss"; + "slot" => target_slot, + ); } - } - } - let state_root_from_prev_block = |i: usize, state: &BeaconState| { - if i > 0 { - let prev_block = blocks[i - 1].message(); - if prev_block.slot() == state.slot() { - Some(prev_block.state_root()) - } else { - None - } - } else { - None - } - }; - - for (i, block) in blocks.iter().enumerate() { - if block.slot() <= state.slot() { - continue; - } - - while state.slot() < block.slot() { - let state_root = match block_replay { - BlockReplay::Accurate => state_root_from_prev_block(i, &state), - BlockReplay::InconsistentStateRoots => Some(Hash256::zero()), - }; - per_slot_processing(&mut state, state_root, &self.spec) - .map_err(HotColdDBError::BlockReplaySlotError)?; - } - - per_block_processing( - &mut state, - block, - None, - BlockSignatureStrategy::NoVerification, - &self.spec, - ) - .map_err(HotColdDBError::BlockReplayBlockError)?; - } - - while state.slot() < target_slot { - let state_root = match block_replay { - BlockReplay::Accurate => state_root_from_prev_block(blocks.len(), &state), - BlockReplay::InconsistentStateRoots => Some(Hash256::zero()), - }; - per_slot_processing(&mut state, state_root, &self.spec) - .map_err(HotColdDBError::BlockReplaySlotError)?; - } - - Ok(state) + block_replayer.into_state() + }) } /// Fetch a copy of the current split slot from memory. @@ -971,6 +963,21 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.put(&SCHEMA_VERSION_KEY, &schema_version) } + /// Store the database schema version atomically with additional operations. + pub fn store_schema_version_atomically( + &self, + schema_version: SchemaVersion, + mut ops: Vec, + ) -> Result<(), Error> { + let column = SchemaVersion::db_column().into(); + let key = SCHEMA_VERSION_KEY.as_bytes(); + let db_key = get_key_for_col(column, key); + let op = KeyValueStoreOp::PutKeyValue(db_key, schema_version.as_store_bytes()); + ops.push(op); + + self.hot_db.do_atomically(ops) + } + /// Initialise the anchor info for checkpoint sync starting from `block`. pub fn init_anchor_info(&self, block: BeaconBlockRef<'_, E>) -> Result { let anchor_slot = block.slot(); @@ -1290,7 +1297,7 @@ pub fn migrate_database, Cold: ItemStore>( // 1. Copy all of the states between the head and the split slot, from the hot DB // to the cold DB. - let state_root_iter = StateRootsIterator::new(store.clone(), frozen_head); + let state_root_iter = StateRootsIterator::new(&store, frozen_head); for maybe_pair in state_root_iter.take_while(|result| match result { Ok((_, slot)) => { slot >= ¤t_split_slot @@ -1404,6 +1411,11 @@ impl StoreItem for Split { } } +/// Type hint. +fn no_state_root_iter() -> Option>> { + None +} + /// Struct for summarising a state in the hot database. /// /// Allows full reconstruction by replaying blocks. diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 8b11a6cc9c..d5448de983 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -2,7 +2,6 @@ use crate::errors::HandleUnavailable; use crate::{Error, HotColdDB, ItemStore}; use std::borrow::Cow; use std::marker::PhantomData; -use std::sync::Arc; use types::{ typenum::Unsigned, BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock, Slot, }; @@ -13,19 +12,19 @@ use types::{ /// /// It is assumed that all ancestors for this object are stored in the database. If this is not the /// case, the iterator will start returning `None` prior to genesis. -pub trait AncestorIter, Cold: ItemStore, I: Iterator> { +pub trait AncestorIter<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore, I: Iterator> { /// Returns an iterator over the roots of the ancestors of `self`. - fn try_iter_ancestor_roots(&self, store: Arc>) -> Option; + fn try_iter_ancestor_roots(&self, store: &'a HotColdDB) -> Option; } impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> - AncestorIter> for SignedBeaconBlock + AncestorIter<'a, E, Hot, Cold, BlockRootsIterator<'a, E, Hot, Cold>> for SignedBeaconBlock { /// Iterates across all available prior block roots of `self`, starting at the most recent and ending /// at genesis. fn try_iter_ancestor_roots( &self, - store: Arc>, + store: &'a HotColdDB, ) -> Option> { let state = store .get_state(&self.message().state_root(), Some(self.slot())) @@ -36,13 +35,13 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> } impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> - AncestorIter> for BeaconState + AncestorIter<'a, E, Hot, Cold, StateRootsIterator<'a, E, Hot, Cold>> for BeaconState { /// Iterates across all available prior state roots of `self`, starting at the most recent and ending /// at genesis. fn try_iter_ancestor_roots( &self, - store: Arc>, + store: &'a HotColdDB, ) -> Option> { // The `self.clone()` here is wasteful. Some(StateRootsIterator::owned(store, self.clone())) @@ -64,13 +63,13 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone } impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> StateRootsIterator<'a, T, Hot, Cold> { - pub fn new(store: Arc>, beacon_state: &'a BeaconState) -> Self { + pub fn new(store: &'a HotColdDB, beacon_state: &'a BeaconState) -> Self { Self { inner: RootsIterator::new(store, beacon_state), } } - pub fn owned(store: Arc>, beacon_state: BeaconState) -> Self { + pub fn owned(store: &'a HotColdDB, beacon_state: BeaconState) -> Self { Self { inner: RootsIterator::owned(store, beacon_state), } @@ -113,18 +112,27 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockRootsIterator<'a, T, Hot, Cold> { /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn new(store: Arc>, beacon_state: &'a BeaconState) -> Self { + pub fn new(store: &'a HotColdDB, beacon_state: &'a BeaconState) -> Self { Self { inner: RootsIterator::new(store, beacon_state), } } /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn owned(store: Arc>, beacon_state: BeaconState) -> Self { + pub fn owned(store: &'a HotColdDB, beacon_state: BeaconState) -> Self { Self { inner: RootsIterator::owned(store, beacon_state), } } + + pub fn from_block( + store: &'a HotColdDB, + block_hash: Hash256, + ) -> Result { + Ok(Self { + inner: RootsIterator::from_block(store, block_hash)?, + }) + } } impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator @@ -141,7 +149,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator /// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`. pub struct RootsIterator<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> { - store: Arc>, + store: &'a HotColdDB, beacon_state: Cow<'a, BeaconState>, slot: Slot, } @@ -151,7 +159,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone { fn clone(&self) -> Self { Self { - store: self.store.clone(), + store: self.store, beacon_state: self.beacon_state.clone(), slot: self.slot, } @@ -159,7 +167,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> Clone } impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, Hot, Cold> { - pub fn new(store: Arc>, beacon_state: &'a BeaconState) -> Self { + pub fn new(store: &'a HotColdDB, beacon_state: &'a BeaconState) -> Self { Self { store, slot: beacon_state.slot(), @@ -167,7 +175,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, } } - pub fn owned(store: Arc>, beacon_state: BeaconState) -> Self { + pub fn owned(store: &'a HotColdDB, beacon_state: BeaconState) -> Self { Self { store, slot: beacon_state.slot(), @@ -176,7 +184,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, } pub fn from_block( - store: Arc>, + store: &'a HotColdDB, block_hash: Hash256, ) -> Result { let block = store @@ -301,14 +309,14 @@ pub struct BlockIterator<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockIterator<'a, T, Hot, Cold> { /// Create a new iterator over all blocks in the given `beacon_state` and prior states. - pub fn new(store: Arc>, beacon_state: &'a BeaconState) -> Self { + pub fn new(store: &'a HotColdDB, beacon_state: &'a BeaconState) -> Self { Self { roots: BlockRootsIterator::new(store, beacon_state), } } /// Create a new iterator over all blocks in the given `beacon_state` and prior states. - pub fn owned(store: Arc>, beacon_state: BeaconState) -> Self { + pub fn owned(store: &'a HotColdDB, beacon_state: BeaconState) -> Self { Self { roots: BlockRootsIterator::owned(store, beacon_state), } @@ -388,9 +396,8 @@ mod test { #[test] fn block_root_iter() { let log = NullLoggerBuilder.build().unwrap(); - let store = Arc::new( - HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(), - ); + let store = + HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); @@ -413,7 +420,7 @@ mod test { state_b.state_roots_mut()[0] = state_a_root; store.put_state(&state_a_root, &state_a).unwrap(); - let iter = BlockRootsIterator::new(store, &state_b); + let iter = BlockRootsIterator::new(&store, &state_b); assert!( iter.clone() @@ -436,9 +443,8 @@ mod test { #[test] fn state_root_iter() { let log = NullLoggerBuilder.build().unwrap(); - let store = Arc::new( - HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(), - ); + let store = + HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let mut state_a: BeaconState = get_state(); @@ -466,7 +472,7 @@ mod test { store.put_state(&state_a_root, &state_a).unwrap(); store.put_state(&state_b_root, &state_b).unwrap(); - let iter = StateRootsIterator::new(store, &state_b); + let iter = StateRootsIterator::new(&store, &state_b); assert!( iter.clone() diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index c86a01213c..8d1993f461 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -30,7 +30,7 @@ pub mod iter; pub use self::chunk_writer::ChunkWriter; pub use self::config::StoreConfig; -pub use self::hot_cold_store::{BlockReplay, HotColdDB, HotStateSummary, Split}; +pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; pub use self::partial_beacon_state::PartialBeaconState; diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index fd20a58801..78c02a02e1 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(5); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(8); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index eacff1b192..7bddca9440 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -14,8 +14,8 @@ use types::*; /// /// Utilises lazy-loading from separate storage for its vector fields. #[superstruct( - variants(Base, Altair), - variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode),) + variants(Base, Altair, Merge), + variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] #[ssz(enum_behaviour = "transparent")] @@ -66,9 +66,9 @@ where pub current_epoch_attestations: VList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub current_epoch_participation: VariableList, // Finality @@ -78,14 +78,18 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub next_sync_committee: Arc>, + + // Execution + #[superstruct(only(Merge))] + pub latest_execution_payload_header: ExecutionPayloadHeader, } /// Implement the conversion function from BeaconState -> PartialBeaconState. @@ -160,6 +164,20 @@ impl PartialBeaconState { inactivity_scores ] ), + BeaconState::Merge(s) => impl_from_state_forgetful!( + s, + outer, + Merge, + PartialBeaconStateMerge, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), } } @@ -335,6 +353,19 @@ impl TryInto> for PartialBeaconState { inactivity_scores ] ), + PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!( + inner, + Merge, + BeaconStateMerge, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), }; Ok(state) } diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index a88af95c85..6b808974e7 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -3,7 +3,9 @@ use crate::hot_cold_store::{HotColdDB, HotColdDBError}; use crate::{Error, ItemStore, KeyValueStore}; use itertools::{process_results, Itertools}; use slog::info; -use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy}; +use state_processing::{ + per_block_processing, per_slot_processing, BlockSignatureStrategy, VerifyBlockRoot, +}; use std::sync::Arc; use types::{EthSpec, Hash256}; @@ -48,8 +50,7 @@ where // Use a dummy root, as we never read the block for the upper limit state. let upper_limit_block_root = Hash256::repeat_byte(0xff); - let block_root_iter = Self::forwards_block_roots_iterator( - self.clone(), + let block_root_iter = self.forwards_block_roots_iterator( lower_limit_slot, upper_limit_state, upper_limit_block_root, @@ -91,6 +92,7 @@ where &block, Some(block_root), BlockSignatureStrategy::NoVerification, + VerifyBlockRoot::True, &self.spec, ) .map_err(HotColdDBError::BlockReplayBlockError)?; diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 93cec12401..7552d42306 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -10,6 +10,7 @@ * [Build from Source](./installation-source.md) * [Raspberry Pi 4](./pi.md) * [Cross-Compiling](./cross-compiling.md) + * [Homebrew](./homebrew.md) * [Key Management](./key-management.md) * [Create a wallet](./wallet-create.md) * [Create a validator](./validator-create.md) diff --git a/book/src/advanced-datadir.md b/book/src/advanced-datadir.md index 3b95bc9d76..9f81112bdd 100644 --- a/book/src/advanced-datadir.md +++ b/book/src/advanced-datadir.md @@ -13,3 +13,47 @@ lighthouse --network mainnet --datadir /var/lib/my-custom-dir vc ``` The first step creates a `validators` directory under `/var/lib/my-custom-dir` which contains the imported keys and [`validator_definitions.yml`](./validator-management.md). After that, we simply run the beacon chain and validator client with the custom dir path. + +### Relative Paths + +[#2682]: https://github.com/sigp/lighthouse/pull/2682 +[#2846]: https://github.com/sigp/lighthouse/pull/2846 + +Prior to the introduction of [#2682][] and [#2846][] (releases v2.0.1 and earlier), Lighthouse would +not correctly parse relative paths from the `lighthouse bn --datadir` flag. + +If the user provided a relative path (e.g., `--datadir here` or `--datadir ./here`), the `beacon` +directory would be split across two paths: + +1. `~/here` (in the *home directory*), containing: + - `chain_db` + - `freezer_db` +1. `./here` (in the *present working directory*), containing: + - `logs` + - `network` + +All versions released after the fix ([#2846][]) will default to storing all files in the present +working directory (i.e. `./here`). New users need not be concerned with the old behaviour. + +For existing users which already have a split data directory, a backwards compatibility feature will +be applied. On start-up, if a split directory scenario is detected (i.e. `~/here` exists), +Lighthouse will continue to operate with split directories. In such a scenario, the following +harmless log will show: + +``` +WARN Legacy datadir location location: "/home/user/datadir/beacon", msg: this occurs when using relative paths for a datadir location +``` + +In this case, the user could solve this warn by following these steps: + +1. Stopping the BN process +1. Consolidating the legacy directory with the new one: + - `mv /home/user/datadir/beacon/* $(pwd)/datadir/beacon` + - Where `$(pwd)` is the present working directory for the Lighthouse binary +1. Removing the legacy directory: + - `rm -r /home/user/datadir/beacon` +1. Restarting the BN process + +Although there are no known issues with using backwards compatibility functionality, having split +directories is likely to cause confusion for users. Therefore, we recommend affected users migrate +to a consolidated directory structure. diff --git a/book/src/advanced-pre-releases.md b/book/src/advanced-pre-releases.md index 3d1b14d1b1..b90bd631d4 100644 --- a/book/src/advanced-pre-releases.md +++ b/book/src/advanced-pre-releases.md @@ -1,40 +1,4 @@ # Pre-Releases -[sigp/lighthouse]: https://github.com/sigp/lighthouse -[sigp/lighthouse/releases/latest]: https://github.com/sigp/lighthouse/releases/latest -[sigp/lighthouse/releases]: https://github.com/sigp/lighthouse/releases -[`v1.4.0-rc.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0-rc.0 -[`v1.4.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0 - -From time-to-time, Lighthouse *pre-releases* will be published on the [sigp/lighthouse] repository. -These releases have passed the usual automated testing, however the developers would like to see it -running "in the wild" in a variety of configurations before declaring it an official, stable -release. Pre-releases are also used by developers to get feedback from users regarding the -ergonomics of new features or changes. - -Github will clearly show such releases as a "Pre-release" and they *will not* show up on -[sigp/lighthouse/releases/latest]. However, pre-releases *will* show up on the -[sigp/lighthouse/releases] page, so **please pay attention to avoid the pre-releases when you're -looking for stable Lighthouse**. - -### Examples - -[`v1.4.0-rc.0`] has `rc` (release candidate) in the version string and is therefore a pre-release. This -release is *not* stable and is *not* intended for critical tasks on mainnet (e.g., staking). - -However, [`v1.4.0`] is considered stable since it is not marked as a pre-release and does not -contain `rc` in the version string. This release is intended for use on mainnet. - -## When to use a pre-release - -Users may wish to try a pre-release for the following reasons: - -- To preview new features before they are officially released. -- To help detect bugs and regressions before they reach production. -- To provide feedback on annoyances before they make it into a release and become harder to change or revert. - -## When *not* to use a pre-release - -It is not recommended to use pre-releases for any critical tasks on mainnet (e.g., staking). To test -critical features, try one of the testnets (e.g., Prater). - +Pre-releases are now referred to as [Release Candidates][./advanced-pre-releases.md]. The terms may +be used interchangeably. diff --git a/book/src/advanced-release-candidates.md b/book/src/advanced-release-candidates.md new file mode 100644 index 0000000000..842bc48404 --- /dev/null +++ b/book/src/advanced-release-candidates.md @@ -0,0 +1,43 @@ +# Release Candidates + +[sigp/lighthouse]: https://github.com/sigp/lighthouse +[sigp/lighthouse/releases/latest]: https://github.com/sigp/lighthouse/releases/latest +[sigp/lighthouse/releases]: https://github.com/sigp/lighthouse/releases +[`v1.4.0-rc.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0-rc.0 +[`v1.4.0`]: https://github.com/sigp/lighthouse/releases/tag/v1.4.0 + +From time-to-time, Lighthouse *release candidates* will be published on the [sigp/lighthouse] +repository. These releases have passed the usual automated testing, however the developers would +like to see it running "in the wild" in a variety of configurations before declaring it an official, +stable release. Release candidates are also used by developers to get feedback from users regarding the +ergonomics of new features or changes. + +Github will clearly show such releases as a "Pre-release" and they *will not* show up on +[sigp/lighthouse/releases/latest]. However, release candidates *will* show up on the +[sigp/lighthouse/releases] page, so **please pay attention to avoid the release candidates when +you're looking for stable Lighthouse**. + +From time to time, Lighthouse may use the terms "release candidate" and "pre release" +interchangeably. A pre release is identical to a release candidate. + +### Examples + +[`v1.4.0-rc.0`] has `rc` in the version string and is therefore a release candidate. This release is +*not* stable and is *not* intended for critical tasks on mainnet (e.g., staking). + +However, [`v1.4.0`] is considered stable since it is not marked as a release candidate and does not +contain `rc` in the version string. This release is intended for use on mainnet. + +## When to use a release candidate + +Users may wish to try a release candidate for the following reasons: + +- To preview new features before they are officially released. +- To help detect bugs and regressions before they reach production. +- To provide feedback on annoyances before they make it into a release and become harder to change or revert. + +## When *not* to use a release candidate + +It is not recommended to use release candidates for any critical tasks on mainnet (e.g., staking). +To test critical features, try one of the testnets (e.g., Prater). + diff --git a/book/src/faq.md b/book/src/faq.md index edd580a531..419f95dcbd 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -55,14 +55,10 @@ voting period the validator might have to wait ~3.4 hours for next voting period. In times of very, very severe network issues, the network may even fail to vote in new Eth1 blocks, stopping all new validator deposits! -> Note: you can see the list of validators included in the beacon chain using -> our REST API: [/beacon/validators/all](./http/beacon.md#beaconvalidatorsall) - #### 2. Waiting for a validator to be activated If a validator has provided an invalid public key or signature, they will -_never_ be activated or even show up in -[/beacon/validators/all](./http/beacon.html#beaconvalidatorsall). +_never_ be activated. They will simply be forgotten by the beacon chain! But, if those parameters were correct, once the Eth1 delays have elapsed and the validator appears in the beacon chain, there's _another_ delay before the validator becomes "active" @@ -133,17 +129,17 @@ same `datadir` as a previous network. I.e if you have been running the boot-up). If you find yourself with a low peer count and is not reaching the target you -expect. Try setting up the correct port forwards as described in `3.` above. +expect. Try setting up the correct port forwards as described [here](./advanced_networking.md#nat-traversal-port-forwarding). ### What should I do if I lose my slashing protection database? -See [here.](./slashing-protection.md#misplaced-slashing-database) +See [here](./slashing-protection.md#misplaced-slashing-database). ### How do I update lighthouse? If you are updating to new release binaries, it will be the same process as described [here.](./installation-binaries.md) -If you are updating by rebuilding from source, see [here.](./installation-source.md#updating-lighthouse) +If you are updating by rebuilding from source, see [here.](./installation-source.md#update-lighthouse) If you are running the docker image provided by Sigma Prime on Dockerhub, you can update to specific versions, for example: diff --git a/book/src/homebrew.md b/book/src/homebrew.md new file mode 100644 index 0000000000..317dc0e0fa --- /dev/null +++ b/book/src/homebrew.md @@ -0,0 +1,36 @@ +# Homebrew package + +Lighthouse is available on Linux and macOS via the [Homebrew package manager](https://brew.sh). + +Please note that this installation method is maintained by the Homebrew community. +It is not officially supported by the Lighthouse team. + +### Installation + +Install the latest version of the [`lighthouse`][formula] formula with: + +```bash +brew install lighthouse +``` + +### Usage + +If Homebrew is installed to your `PATH` (default), simply run: + +```bash +lighthouse --help +``` + +Alternatively, you can find the `lighthouse` binary at: + +```bash +"$(brew --prefix)/bin/lighthouse" --help +``` + +### Maintenance + +The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. + +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/lighthouse.rb) repo. + + [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 864e647ad7..4b977f5222 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -1,85 +1,107 @@ -# Installation: Build from Source +# Build from Source -Lighthouse builds on Linux, macOS, and Windows (native Windows support in -BETA, we also support Windows via [WSL][]). +Lighthouse builds on Linux, macOS, and Windows. Install the [Dependencies](#dependencies) using +the instructions below, and then proceed to [Building Lighthouse](#build-lighthouse). -Compilation should be easy. In fact, if you already have Rust and the build -dependencies installed, all you need is: +## Dependencies -- `git clone https://github.com/sigp/lighthouse.git` -- `cd lighthouse` -- `git checkout stable` -- `make` +First, **install Rust** using [rustup](https://rustup.rs/). The rustup installer provides an easy way +to update the Rust compiler, and works on all platforms. -If this doesn't work or is not clear enough, see the [Detailed -Instructions](#detailed-instructions) below. If you have further issues, see -[Troubleshooting](#troubleshooting). If you'd prefer to use Docker, see the -[Docker Guide](./docker.md). +With Rust installed, follow the instructions below to install dependencies relevant to your +operating system. -## Updating lighthouse +#### Ubuntu + +Install the following packages: + +```bash +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang +``` + +#### macOS + +1. Install the [Homebrew][] package manager. +1. Install CMake using Homebrew: + +``` +brew install cmake +``` + +[Homebrew]: https://brew.sh/ + +#### Windows + +1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). +1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. +1. Install Make, CMake and LLVM using Chocolatey: + +``` +choco install make +``` + +``` +choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' +``` + +``` +choco install llvm +``` + +These dependencies are for compiling Lighthouse natively on Windows, which is currently in beta +testing. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. +If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies +(Ubuntu)](#ubuntu) section. + +[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about + +## Build Lighthouse + +Once you have Rust and the build dependencies you're ready to build Lighthouse: + +``` +git clone https://github.com/sigp/lighthouse.git +``` + +``` +cd lighthouse +``` + +``` +git checkout stable +``` + +``` +make +``` + +Compilation may take around 10 minutes. Installation was successful if `lighthouse --help` displays +the command-line documentation. + +If you run into any issues, please check the [Troubleshooting](#troubleshooting) section, or reach +out to us on [Discord](https://discord.gg/cyAszAh). + +## Update Lighthouse You can update Lighthouse to a specific version by running the commands below. The `lighthouse` directory will be the location you cloned Lighthouse to during the installation process. `${VERSION}` will be the version you wish to build in the format `vX.X.X`. -- `cd lighthouse` -- `git fetch` -- `git checkout ${VERSION}` -- `make` - - -## Detailed Instructions - -1. Install the build dependencies for your platform - - Check the [Dependencies](#dependencies) section for additional - information. -1. Clone the Lighthouse repository. - - Run `$ git clone https://github.com/sigp/lighthouse.git` - - Change into the newly created directory with `$ cd lighthouse` -1. Build Lighthouse with `$ make`. -1. Installation was successful if `$ lighthouse --help` displays the command-line documentation. - -> First time compilation may take several minutes. If you experience any -> failures, please reach out on [discord](https://discord.gg/cyAszAh) or -> [create an issue](https://github.com/sigp/lighthouse/issues/new). - - -## Dependencies - -#### Installing Rust - -The best way to install Rust (regardless of platform) is usually with [rustup](https://rustup.rs/) -- Use the `stable` toolchain (it's the default). - -#### Windows Support - -These instructions are for compiling or running Lighthouse natively on Windows, which is currently in -BETA testing. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. -If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the -[Dependencies (Ubuntu)](#ubuntu) section. - -[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about - -1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) -1. Install [Chocolatey](https://chocolatey.org/install) Package Manager for Windows - - Install `make` via `choco install make` - - Install `cmake` via `choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System'` - -#### Ubuntu - -Several dependencies may be required to compile Lighthouse. The following -packages may be required in addition a base Ubuntu Server installation: - -```bash -sudo apt install -y git gcc g++ make cmake pkg-config +``` +cd lighthouse ``` -#### macOS +``` +git fetch +``` -You will need `cmake`. You can install via homebrew: - - brew install cmake +``` +git checkout ${VERSION} +``` +``` +make +``` ## Troubleshooting @@ -93,12 +115,12 @@ See ["Configuring the `PATH` environment variable" ### Compilation error -Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `$ rustup update`. +Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `rustup update`. If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of memory during compilation. If you are on a resource-constrained device you can -look into [cross compilation](./cross-compiling.md). +look into [cross compilation](./cross-compiling.md), or use a [pre-built +binary](./installation-binaries.md). If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. -[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about diff --git a/book/src/installation.md b/book/src/installation.md index 009bfc00c0..38fbe6b780 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -8,6 +8,10 @@ There are three core methods to obtain the Lighthouse application: - [Docker images](./docker.md). - [Building from source](./installation-source.md). +The community maintains additional installation methods (currently only one). + +- [Homebrew package](./homebrew.md). + Additionally, there are two extra guides for specific uses: - [Rapsberry Pi 4 guide](./pi.md). diff --git a/book/src/intro.md b/book/src/intro.md index d3a95c8631..b31deeef88 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -20,7 +20,7 @@ You may read this book from start to finish, or jump to some of these topics: - Follow the [Installation Guide](./installation.md) to install Lighthouse. - Learn about [becoming a mainnet validator](./mainnet-validator.md). - Get hacking with the [Development Environment Guide](./setup.md). -- Utilize the whole stack by starting a [local testnet](./local-testnets.md). +- Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets). - Query the [RESTful HTTP API](./api.md) using `curl`. diff --git a/book/src/pi.md b/book/src/pi.md index 6bc274c9a3..24796d394e 100644 --- a/book/src/pi.md +++ b/book/src/pi.md @@ -22,7 +22,7 @@ terminal and an Internet connection are necessary. ### 2. Install Packages -Install the [Ubuntu Dependencies](installation.md#dependencies-ubuntu). +Install the [Ubuntu Dependencies](installation-source.md#ubuntu). (I.e., run the `sudo apt install ...` command at that link). > Tips: diff --git a/book/src/redundancy.md b/book/src/redundancy.md index a50e324374..b01a01dd26 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -86,7 +86,7 @@ now processing, validating, aggregating and forwarding *all* attestations, whereas previously it was likely only doing a fraction of this work. Without these flags, subscription to attestation subnets and aggregation of attestations is only performed for validators which [explicitly request -subscriptions](subscribe-api). +subscriptions][subscribe-api]. There are 64 subnets and each validator will result in a subscription to *at least* one subnet. So, using the two aforementioned flags will result in diff --git a/book/src/slasher.md b/book/src/slasher.md index 126573c556..05107238c3 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -12,7 +12,6 @@ of the immaturity of the slasher UX and the extra resources required. * Quad-core CPU * 16 GB RAM * 256 GB solid state storage (in addition to space for the beacon node DB) -* ⚠️ **If you are running natively on Windows**: LMDB will pre-allocate the entire 256 GB for the slasher database ## How to Run @@ -66,24 +65,29 @@ changed after initialization. * Argument: maximum size of the database in gigabytes * Default: 256 GB -The slasher uses LMDB as its backing store, and LMDB will consume up to the maximum amount of disk -space allocated to it. By default the limit is set to accomodate the default history length and -around 150K validators but you can set it lower if running with a reduced history length. The space -required scales approximately linearly in validator count and history length, i.e. if you halve -either you can halve the space required. +The slasher uses MDBX as its backing store, which places a hard limit on the size of the database +file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after +initialization if the limit is reached. -If you want a better estimate you can use this formula: +By default the limit is set to accomodate the default history length and around 300K validators but +you can set it lower if running with a reduced history length. The space required scales +approximately linearly in validator count and history length, i.e. if you halve either you can halve +the space required. + +If you want an estimate of the database size you can use this formula: ``` -360 * V * N + (16 * V * N)/(C * K) + 15000 * N +4.56 GB * (N / 256) * (V / 250000) ``` -where +where `V` is the validator count and `N` is the history length. -* `V` is the validator count -* `N` is the history length -* `C` is the chunk size -* `K` is the validator chunk size +You should set the maximum size higher than the estimate to allow room for growth in the validator +count. + +> NOTE: In Lighthouse v2.1.0 the slasher database was switched from LMDB to MDBX. Unlike LMDB, MDBX +> does garbage collection of free pages and is capable of shrinking the database file and preventing +> it from growing indefinitely. ### Update Period @@ -138,6 +142,19 @@ about [how the slasher works][design-notes], and/or reading the source code. [design-notes]: https://hackmd.io/@sproul/min-max-slasher +### Attestation Root Cache Size + +* Flag: `--slasher-att-cache-size COUNT` +* Argument: number of attestations +* Default: 100,000 + +The number of attestation data roots to cache in memory. The cache is an LRU cache used to map +indexed attestation IDs to the tree hash roots of their attestation data. The cache prevents reading +whole indexed attestations from disk to determine whether they are slashable. + +Each value is very small (38 bytes) so the entire cache should fit in around 4 MB of RAM. Decreasing +the cache size is not recommended, and the size is set so as to be large enough for future growth. + ### Short-Range Example If you would like to run a lightweight slasher that just checks blocks and attestations within diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index 72e2e379c7..67e17fecad 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -98,7 +98,7 @@ curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/global" -H Returns a per-validator summary of how that validator performed during the current epoch. -The [Global Votes](#consensusglobal_votes) endpoint is the summation of all of these +The [Global Votes](#global) endpoint is the summation of all of these individual values, please see it for definitions of terms like "current_epoch", "previous_epoch" and "target_attester". diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 520cb06391..2d596aad6e 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.0.1" +version = "2.1.0" authors = ["Sigma Prime "] edition = "2018" @@ -10,7 +10,7 @@ clap = "2.33.3" clap_utils = { path = "../common/clap_utils" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } types = { path = "../consensus/types" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" slog = "2.5.2" tokio = "1.14.0" log = "0.4.11" @@ -23,3 +23,4 @@ hex = "0.4.2" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.66" +eth2_network_config = { path = "../common/eth2_network_config" } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 704cbb2a82..4df7a5f235 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -1,5 +1,6 @@ -use beacon_node::{get_data_dir, get_eth2_network_config, set_network_config}; +use beacon_node::{get_data_dir, set_network_config}; use clap::ArgMatches; +use eth2_network_config::Eth2NetworkConfig; use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr}; use lighthouse_network::{ discovery::{create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr}, @@ -7,7 +8,6 @@ use lighthouse_network::{ }; use serde_derive::{Deserialize, Serialize}; use ssz::Encode; -use std::convert::TryFrom; use std::net::SocketAddr; use std::{marker::PhantomData, path::PathBuf}; use types::EthSpec; @@ -23,15 +23,13 @@ pub struct BootNodeConfig { phantom: PhantomData, } -impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { - type Error = String; - - fn try_from(matches: &ArgMatches<'_>) -> Result { +impl BootNodeConfig { + pub fn new( + matches: &ArgMatches<'_>, + eth2_network_config: &Eth2NetworkConfig, + ) -> Result { let data_dir = get_data_dir(matches); - // Try and grab network config from input CLI params - let eth2_network_config = get_eth2_network_config(matches)?; - // Try and obtain bootnodes let boot_nodes = { @@ -134,13 +132,15 @@ impl TryFrom<&ArgMatches<'_>> for BootNodeConfig { /// The set of configuration parameters that can safely be (de)serialized. /// -/// Its fields are a subset of the fields of `BootNodeConfig`. +/// Its fields are a subset of the fields of `BootNodeConfig`, some of them are copied from `Discv5Config`. #[derive(Serialize, Deserialize)] pub struct BootNodeConfigSerialization { pub listen_socket: SocketAddr, // TODO: Generalise to multiaddr pub boot_nodes: Vec, pub local_enr: Enr, + pub disable_packet_filter: bool, + pub enable_enr_auto_update: bool, } impl BootNodeConfigSerialization { @@ -152,7 +152,7 @@ impl BootNodeConfigSerialization { boot_nodes, local_enr, local_key: _, - discv5_config: _, + discv5_config, phantom: _, } = config; @@ -160,6 +160,8 @@ impl BootNodeConfigSerialization { listen_socket: *listen_socket, boot_nodes: boot_nodes.clone(), local_enr: local_enr.clone(), + disable_packet_filter: !discv5_config.enable_packet_filter, + enable_enr_auto_update: discv5_config.enr_update, } } } diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index ed3a5655b3..2afc063808 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -2,7 +2,7 @@ use clap::ArgMatches; use slog::{o, Drain, Level, Logger}; -use std::convert::TryFrom; +use eth2_network_config::Eth2NetworkConfig; use std::fs::File; use std::path::PathBuf; mod cli; @@ -19,6 +19,7 @@ pub fn run( lh_matches: &ArgMatches<'_>, bn_matches: &ArgMatches<'_>, eth_spec_id: EthSpecId, + eth2_network_config: &Eth2NetworkConfig, debug_level: String, ) { let debug_level = match debug_level.as_str() { @@ -56,8 +57,12 @@ pub fn run( let log = slog_scope::logger(); // Run the main function emitting any errors if let Err(e) = match eth_spec_id { - EthSpecId::Minimal => main::(lh_matches, bn_matches, log), - EthSpecId::Mainnet => main::(lh_matches, bn_matches, log), + EthSpecId::Minimal => { + main::(lh_matches, bn_matches, eth2_network_config, log) + } + EthSpecId::Mainnet => { + main::(lh_matches, bn_matches, eth2_network_config, log) + } } { slog::crit!(slog_scope::logger(), "{}", e); } @@ -66,6 +71,7 @@ pub fn run( fn main( lh_matches: &ArgMatches<'_>, bn_matches: &ArgMatches<'_>, + eth2_network_config: &Eth2NetworkConfig, log: slog::Logger, ) -> Result<(), String> { // Builds a custom executor for the bootnode @@ -74,8 +80,8 @@ fn main( .build() .map_err(|e| format!("Failed to build runtime: {}", e))?; - // Parse the CLI args into a useable config - let config: BootNodeConfig = BootNodeConfig::try_from(bn_matches)?; + // parse the CLI args into a useable config + let config: BootNodeConfig = BootNodeConfig::new(bn_matches, eth2_network_config)?; // Dump config if `dump-config` flag is set let dump_config = clap_utils::parse_optional::(lh_matches, "dump-config")?; diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 9db525683b..542a13ad4e 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -11,4 +11,5 @@ clap = "2.33.3" hex = "0.4.2" dirs = "3.0.1" eth2_network_config = { path = "../eth2_network_config" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" +ethereum-types = "0.12.1" diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index dc82cbe669..f8c6e8b7ce 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,7 +1,8 @@ //! A helper library for parsing values from `clap::ArgMatches`. use clap::ArgMatches; -use eth2_network_config::Eth2NetworkConfig; +use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; +use ethereum_types::U256 as Uint256; use ssz::Decode; use std::path::PathBuf; use std::str::FromStr; @@ -13,6 +14,47 @@ pub const BAD_TESTNET_DIR_MESSAGE: &str = "The hard-coded testnet directory was or when there is no default public network to connect to. \ During these times you must specify a --testnet-dir."; +/// Try to parse the eth2 network config from the `network`, `testnet-dir` flags in that order. +/// Returns the default hardcoded testnet if neither flags are set. +pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result { + let optional_network_config = if cli_args.is_present("network") { + parse_hardcoded_network(cli_args, "network")? + } else if cli_args.is_present("testnet-dir") { + parse_testnet_dir(cli_args, "testnet-dir")? + } else { + // if neither is present, assume the default network + Eth2NetworkConfig::constant(DEFAULT_HARDCODED_NETWORK)? + }; + + let mut eth2_network_config = + optional_network_config.ok_or_else(|| BAD_TESTNET_DIR_MESSAGE.to_string())?; + + if let Some(string) = parse_optional::(cli_args, "terminal-total-difficulty-override")? + { + let stripped = string.replace(",", ""); + let terminal_total_difficulty = Uint256::from_dec_str(&stripped).map_err(|e| { + format!( + "Could not parse --terminal-total-difficulty-override as decimal value: {:?}", + e + ) + })?; + + eth2_network_config.config.terminal_total_difficulty = terminal_total_difficulty; + } + + if let Some(hash) = parse_optional(cli_args, "terminal-block-hash-override")? { + eth2_network_config.config.terminal_block_hash = hash; + } + + if let Some(epoch) = parse_optional(cli_args, "terminal-block-hash-epoch-override")? { + eth2_network_config + .config + .terminal_block_hash_activation_epoch = epoch; + } + + Ok(eth2_network_config) +} + /// Attempts to load the testnet dir at the path if `name` is in `matches`, returning an error if /// the path cannot be found or the testnet dir is invalid. pub fn parse_testnet_dir( diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 4746d570bb..e1f0579a40 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -14,6 +14,6 @@ hex = "0.4.2" [dependencies] types = { path = "../../consensus/types"} -eth2_ssz = "0.4.0" -tree_hash = "0.4.0" +eth2_ssz = "0.4.1" +tree_hash = "0.4.1" ethabi = "12.0.0" diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs index aeb781d7a4..62b98aab94 100644 --- a/common/directory/src/lib.rs +++ b/common/directory/src/lib.rs @@ -40,6 +40,19 @@ pub fn ensure_dir_exists>(path: P) -> Result<(), String> { Ok(()) } +/// If `arg` is in `matches`, parses the value as a path. +/// +/// Otherwise, attempts to find the default directory for the `testnet` from the `matches`. +pub fn parse_path_or_default(matches: &ArgMatches, arg: &'static str) -> Result { + clap_utils::parse_path_with_default_in_home_dir( + matches, + arg, + PathBuf::new() + .join(DEFAULT_ROOT_DIR) + .join(get_network_dir(matches)), + ) +} + /// If `arg` is in `matches`, parses the value as a path. /// /// Otherwise, attempts to find the default directory for the `testnet` from the `matches` diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 8997499735..f1c9f5061e 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -13,14 +13,14 @@ types = { path = "../../consensus/types" } reqwest = { version = "0.11.0", features = ["json","stream"] } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } proto_array = { path = "../../consensus/proto_array", optional = true } -eth2_serde_utils = "0.1.0" +eth2_serde_utils = "0.1.1" eth2_keystore = { path = "../../crypto/eth2_keystore" } libsecp256k1 = "0.6.0" ring = "0.16.19" bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } sensitive_url = { path = "../../common/sensitive_url" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" futures-util = "0.3.8" futures = "0.3.8" diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index bdad672866..153667d7e9 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1256,8 +1256,12 @@ impl BeaconNodeHttpClient { .push("attester") .push(&epoch.to_string()); - self.post_with_timeout_and_response(path, &indices, self.timeouts.attester_duties) - .await + self.post_with_timeout_and_response( + path, + &ValidatorIndexDataRef(indices), + self.timeouts.attester_duties, + ) + .await } /// `POST validator/aggregate_and_proofs` @@ -1356,8 +1360,12 @@ impl BeaconNodeHttpClient { .push("sync") .push(&epoch.to_string()); - self.post_with_timeout_and_response(path, &indices, self.timeouts.sync_duties) - .await + self.post_with_timeout_and_response( + path, + &ValidatorIndexDataRef(indices), + self.timeouts.sync_duties, + ) + .await } } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 42131b49cc..a761b9ed12 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -428,10 +428,13 @@ pub struct AttestationPoolQuery { pub committee_index: Option, } -#[derive(Deserialize)] +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] pub struct ValidatorsQuery { - pub id: Option>, - pub status: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub id: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub status: Option>, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -520,33 +523,81 @@ pub struct SyncingData { #[derive(Clone, PartialEq, Debug, Deserialize)] #[serde(try_from = "String", bound = "T: FromStr")] -pub struct QueryVec(pub Vec); +pub struct QueryVec { + values: Vec, +} + +fn query_vec<'de, D, T>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, + T: FromStr, +{ + let vec: Vec> = Deserialize::deserialize(deserializer)?; + Ok(Vec::from(QueryVec::from(vec))) +} + +fn option_query_vec<'de, D, T>(deserializer: D) -> Result>, D::Error> +where + D: serde::Deserializer<'de>, + T: FromStr, +{ + let vec: Vec> = Deserialize::deserialize(deserializer)?; + if vec.is_empty() { + return Ok(None); + } + + Ok(Some(Vec::from(QueryVec::from(vec)))) +} + +impl From>> for QueryVec { + fn from(vecs: Vec>) -> Self { + Self { + values: vecs.into_iter().flat_map(|qv| qv.values).collect(), + } + } +} impl TryFrom for QueryVec { type Error = String; fn try_from(string: String) -> Result { if string.is_empty() { - return Ok(Self(vec![])); + return Ok(Self { values: vec![] }); } - string - .split(',') - .map(|s| s.parse().map_err(|_| "unable to parse".to_string())) - .collect::, String>>() - .map(Self) + Ok(Self { + values: string + .split(',') + .map(|s| s.parse().map_err(|_| "unable to parse query".to_string())) + .collect::, String>>()?, + }) + } +} + +impl From> for Vec { + fn from(vec: QueryVec) -> Vec { + vec.values } } #[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] pub struct ValidatorBalancesQuery { - pub id: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub id: Option>, } #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct ValidatorIndexData(#[serde(with = "eth2_serde_utils::quoted_u64_vec")] pub Vec); +/// Borrowed variant of `ValidatorIndexData`, for serializing/sending. +#[derive(Clone, Copy, Serialize)] +#[serde(transparent)] +pub struct ValidatorIndexDataRef<'a>( + #[serde(serialize_with = "eth2_serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], +); + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AttesterData { pub pubkey: PublicKeyBytes, @@ -602,9 +653,12 @@ pub struct BeaconCommitteeSubscription { } #[derive(Deserialize)] +#[serde(deny_unknown_fields)] pub struct PeersQuery { - pub state: Option>, - pub direction: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub state: Option>, + #[serde(default, deserialize_with = "option_query_vec")] + pub direction: Option>, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -777,7 +831,7 @@ pub struct SseLateHead { #[derive(PartialEq, Debug, Serialize, Clone)] #[serde(bound = "T: EthSpec", untagged)] pub enum EventKind { - Attestation(Attestation), + Attestation(Box>), Block(SseBlock), FinalizedCheckpoint(SseFinalizedCheckpoint), Head(SseHead), @@ -858,8 +912,10 @@ impl EventKind { } #[derive(Clone, Deserialize)] +#[serde(deny_unknown_fields)] pub struct EventQuery { - pub topics: QueryVec, + #[serde(deserialize_with = "query_vec")] + pub topics: Vec, } #[derive(Debug, Clone, Copy, PartialEq, Deserialize)] @@ -961,7 +1017,9 @@ mod tests { fn query_vec() { assert_eq!( QueryVec::try_from("0,1,2".to_string()).unwrap(), - QueryVec(vec![0_u64, 1, 2]) + QueryVec { + values: vec![0_u64, 1, 2] + } ); } } diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 8fdd32e1ba..aac11c4ea8 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -16,6 +16,6 @@ tempfile = "3.1.0" [dependencies] serde_yaml = "0.8.13" types = { path = "../../consensus/types"} -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_config = { path = "../eth2_config"} enr = { version = "0.5.1", features = ["ed25519", "k256"] } diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 8be60242b0..b889b82887 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -3,6 +3,14 @@ # Extends the mainnet preset PRESET_BASE: 'mainnet' +# Transition +# --------------------------------------------------------------- +# TBD, 2**256-2**10 is a placeholder +TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + # Genesis # --------------------------------------------------------------- # `2**14` (= 16,384) @@ -25,8 +33,8 @@ GENESIS_DELAY: 604800 ALTAIR_FORK_VERSION: 0x01000000 ALTAIR_FORK_EPOCH: 74240 # Merge -MERGE_FORK_VERSION: 0x02000000 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02000000 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -63,6 +71,12 @@ MIN_PER_EPOCH_CHURN_LIMIT: 4 CHURN_LIMIT_QUOTIENT: 65536 +# Fork choice +# --------------------------------------------------------------- +# TODO: enable once proposer boosting is desired on mainnet +# 70% +# PROPOSER_SCORE_BOOST: 70 + # Deposit contract # --------------------------------------------------------------- # Ethereum PoW Mainnet diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index c1c537b788..72a106f36a 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -3,6 +3,14 @@ # Extends the mainnet preset PRESET_BASE: 'mainnet' +# Transition +# --------------------------------------------------------------- +# TBD, 2**256-2**10 is a placeholder +TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + # Genesis # --------------------------------------------------------------- # `2**14` (= 16,384) @@ -25,8 +33,8 @@ GENESIS_DELAY: 1919188 ALTAIR_FORK_VERSION: 0x01001020 ALTAIR_FORK_EPOCH: 36660 # Merge -MERGE_FORK_VERSION: 0x02001020 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02001020 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03001020 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -63,6 +71,11 @@ MIN_PER_EPOCH_CHURN_LIMIT: 4 CHURN_LIMIT_QUOTIENT: 65536 +# Fork choice +# --------------------------------------------------------------- +# 70% +PROPOSER_SCORE_BOOST: 70 + # Deposit contract # --------------------------------------------------------------- # Ethereum Goerli testnet diff --git a/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml b/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml index 4a3581c31f..913671c2be 100644 --- a/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/pyrmont/config.yaml @@ -3,6 +3,14 @@ # Extends the mainnet preset PRESET_BASE: 'mainnet' +# Transition +# --------------------------------------------------------------- +# TBD, 2**256-2**10 is a placeholder +TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + # Genesis # --------------------------------------------------------------- # `2**14` (= 16,384) @@ -25,8 +33,8 @@ GENESIS_DELAY: 432000 ALTAIR_FORK_VERSION: 0x01002009 ALTAIR_FORK_EPOCH: 61650 # Merge -MERGE_FORK_VERSION: 0x02002009 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02002009 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03002009 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -63,6 +71,11 @@ MIN_PER_EPOCH_CHURN_LIMIT: 4 CHURN_LIMIT_QUOTIENT: 65536 +# Fork choice +# --------------------------------------------------------------- +# 70% +PROPOSER_SCORE_BOOST: 70 + # Deposit contract # --------------------------------------------------------------- # Ethereum Goerli testnet diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 4b7160ae05..98973de1ad 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -307,6 +307,12 @@ pub fn set_float_gauge(gauge: &Result, value: f64) { } } +pub fn set_float_gauge_vec(gauge_vec: &Result, name: &[&str], value: f64) { + if let Some(gauge) = get_gauge(gauge_vec, name) { + gauge.set(value); + } +} + pub fn inc_gauge(gauge: &Result) { if let Ok(gauge) = gauge { gauge.inc(); diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 5892f59f56..6f2baf132c 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -16,7 +16,7 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.0.1-", + prefix = "Lighthouse/v2.1.0-", fallback = "unknown" ); diff --git a/common/lockfile/src/lib.rs b/common/lockfile/src/lib.rs index 82e28256f7..adb8be7bb7 100644 --- a/common/lockfile/src/lib.rs +++ b/common/lockfile/src/lib.rs @@ -11,7 +11,7 @@ use std::path::{Path, PathBuf}; /// outage) caused the lockfile not to be deleted. #[derive(Debug)] pub struct Lockfile { - file: File, + _file: File, path: PathBuf, file_existed: bool, } @@ -43,7 +43,7 @@ impl Lockfile { _ => LockfileError::IoError(path.clone(), e), })?; Ok(Self { - file, + _file: file, path, file_existed, }) diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 0deb55a6b6..da1aa8b529 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -12,4 +12,4 @@ slog = "2.5.2" slog-term = "2.6.0" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" -sloggers = "2.0.2" +sloggers = { version = "2.1.1", features = ["json"] } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 06c121210a..eab8e326b6 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -80,10 +80,8 @@ impl<'a> AlignedRecordDecorator<'a> { message_width, } } -} -impl<'a> Write for AlignedRecordDecorator<'a> { - fn write(&mut self, buf: &[u8]) -> Result { + fn filtered_write(&mut self, buf: &[u8]) -> Result { if self.ignore_comma { //don't write comma self.ignore_comma = false; @@ -97,6 +95,21 @@ impl<'a> Write for AlignedRecordDecorator<'a> { self.wrapped.write(buf) } } +} + +impl<'a> Write for AlignedRecordDecorator<'a> { + fn write(&mut self, buf: &[u8]) -> Result { + if buf.iter().any(u8::is_ascii_control) { + let filtered = buf + .iter() + .cloned() + .map(|c| if !is_ascii_control(&c) { c } else { b'_' }) + .collect::>(); + self.filtered_write(&filtered) + } else { + self.filtered_write(buf) + } + } fn flush(&mut self) -> Result<()> { self.wrapped.flush() @@ -159,6 +172,21 @@ impl<'a> slog_term::RecordDecorator for AlignedRecordDecorator<'a> { } } +/// Function to filter out ascii control codes. +/// +/// This helps to keep log formatting consistent. +/// Whitespace and padding control codes are excluded. +fn is_ascii_control(character: &u8) -> bool { + matches!( + character, + b'\x00'..=b'\x08' | + b'\x0b'..=b'\x0c' | + b'\x0e'..=b'\x1f' | + b'\x7f' | + b'\x81'..=b'\x9f' + ) +} + /// Return a logger suitable for test usage. /// /// By default no logs will be printed, but they can be enabled via diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 685c524212..813584992e 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -11,3 +11,6 @@ lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" libc = "0.2.79" parking_lot = "0.11.0" + +[features] +mallinfo2 = [] diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index f65c933dd7..402cdc27aa 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -82,27 +82,8 @@ lazy_static! { /// Calls `mallinfo` and updates Prometheus metrics with the results. pub fn scrape_mallinfo_metrics() { - // The docs for this function say it is thread-unsafe since it may return inconsistent results. - // Since these are just metrics it's not a concern to us if they're sometimes inconsistent. - // - // There exists a `mallinfo2` function, however it was released in February 2021 and this seems - // too recent to rely on. - // - // Docs: - // - // https://man7.org/linux/man-pages/man3/mallinfo.3.html let mallinfo = mallinfo(); - /// Cast a C integer as returned by `mallinfo` to an unsigned i64. - /// - /// A cast from `i32` to `i64` preserves the sign bit, resulting in incorrect negative values. - /// Going via `u32` treats the sign bit as part of the number. - /// - /// Results are still wrong for memory usage over 4GiB due to limitations of mallinfo. - fn unsigned_i64(x: i32) -> i64 { - x as u32 as i64 - } - set_gauge(&MALLINFO_ARENA, unsigned_i64(mallinfo.arena)); set_gauge(&MALLINFO_ORDBLKS, unsigned_i64(mallinfo.ordblks)); set_gauge(&MALLINFO_SMBLKS, unsigned_i64(mallinfo.smblks)); @@ -114,6 +95,23 @@ pub fn scrape_mallinfo_metrics() { set_gauge(&MALLINFO_KEEPCOST, unsigned_i64(mallinfo.keepcost)); } +/// Cast a C integer as returned by `mallinfo` to an unsigned i64. +/// +/// A cast from `i32` to `i64` preserves the sign bit, resulting in incorrect negative values. +/// Going via `u32` treats the sign bit as part of the number. +/// +/// Results are still wrong for memory usage over 4GiB due to limitations of mallinfo. +#[cfg(not(feature = "mallinfo2"))] +fn unsigned_i64(x: i32) -> i64 { + x as u32 as i64 +} + +/// Cast a C `size_t` as returned by `mallinfo2` to an unsigned i64. +#[cfg(feature = "mallinfo2")] +fn unsigned_i64(x: usize) -> i64 { + x as i64 +} + /// Perform all configuration routines. pub fn configure_glibc_malloc() -> Result<(), String> { if !env_var_present(ENV_VAR_MMAP_THRESHOLD) { @@ -146,12 +144,24 @@ fn mallopt(param: c_int, val: c_int) -> c_int { unsafe { libc::mallopt(param, val) } } +/// By default we use `mallinfo`, but it overflows, so `mallinfo2` should be enabled if available. +/// +/// https://man7.org/linux/man-pages/man3/mallinfo.3.html +#[cfg(not(feature = "mallinfo2"))] fn mallinfo() -> libc::mallinfo { // Prevent this function from being called in parallel with any other non-thread-safe function. let _lock = GLOBAL_LOCK.lock(); unsafe { libc::mallinfo() } } +/// Use `mallinfo2` if enabled. +#[cfg(feature = "mallinfo2")] +fn mallinfo() -> libc::mallinfo2 { + // Prevent this function from being called in parallel with any other non-thread-safe function. + let _lock = GLOBAL_LOCK.lock(); + unsafe { libc::mallinfo2() } +} + fn into_result(result: c_int) -> Result<(), c_int> { if result == 1 { Ok(()) diff --git a/common/monitoring_api/src/lib.rs b/common/monitoring_api/src/lib.rs index 5eb7ea7193..03cdf87c25 100644 --- a/common/monitoring_api/src/lib.rs +++ b/common/monitoring_api/src/lib.rs @@ -128,7 +128,7 @@ impl MonitoringHttpClient { Error::BeaconMetricsFailed("Beacon metrics require db path".to_string()) })?; - let freezer_db_path = self.db_path.as_ref().ok_or_else(|| { + let freezer_db_path = self.freezer_db_path.as_ref().ok_or_else(|| { Error::BeaconMetricsFailed("Beacon metrics require freezer db path".to_string()) })?; let metrics = diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 2d14abb55a..183f5c9313 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -11,6 +11,7 @@ pub use crate::manual_slot_clock::ManualSlotClock; pub use crate::manual_slot_clock::ManualSlotClock as TestingSlotClock; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use metrics::scrape_for_metrics; +use types::consts::merge::INTERVALS_PER_SLOT; pub use types::Slot; /// A clock that reports the current slot. @@ -65,6 +66,9 @@ pub trait SlotClock: Send + Sync + Sized + Clone { /// Returns the first slot to be returned at the genesis time. fn genesis_slot(&self) -> Slot; + /// Returns the `Duration` from `UNIX_EPOCH` to the genesis time. + fn genesis_duration(&self) -> Duration; + /// Returns the slot if the internal clock were advanced by `duration`. fn now_with_future_tolerance(&self, tolerance: Duration) -> Option { self.slot_of(self.now_duration()?.checked_add(tolerance)?) @@ -79,24 +83,47 @@ pub trait SlotClock: Send + Sync + Sized + Clone { /// Returns the delay between the start of the slot and when unaggregated attestations should be /// produced. fn unagg_attestation_production_delay(&self) -> Duration { - self.slot_duration() / 3 + self.slot_duration() / INTERVALS_PER_SLOT as u32 } /// Returns the delay between the start of the slot and when sync committee messages should be /// produced. fn sync_committee_message_production_delay(&self) -> Duration { - self.slot_duration() / 3 + self.slot_duration() / INTERVALS_PER_SLOT as u32 } /// Returns the delay between the start of the slot and when aggregated attestations should be /// produced. fn agg_attestation_production_delay(&self) -> Duration { - self.slot_duration() * 2 / 3 + self.slot_duration() * 2 / INTERVALS_PER_SLOT as u32 } /// Returns the delay between the start of the slot and when partially aggregated `SyncCommitteeContribution` should be /// produced. fn sync_committee_contribution_production_delay(&self) -> Duration { - self.slot_duration() * 2 / 3 + self.slot_duration() * 2 / INTERVALS_PER_SLOT as u32 + } + + /// Returns the `Duration` since the start of the current `Slot`. Useful in determining whether to apply proposer boosts. + fn seconds_from_current_slot_start(&self, seconds_per_slot: u64) -> Option { + self.now_duration() + .and_then(|now| now.checked_sub(self.genesis_duration())) + .map(|duration_into_slot| { + Duration::from_secs(duration_into_slot.as_secs() % seconds_per_slot) + }) + } + + /// Produces a *new* slot clock with the same configuration of `self`, except that clock is + /// "frozen" at the `freeze_at` time. + /// + /// This is useful for observing the slot clock at arbitrary fixed points in time. + fn freeze_at(&self, freeze_at: Duration) -> ManualSlotClock { + let slot_clock = ManualSlotClock::new( + self.genesis_slot(), + self.genesis_duration(), + self.slot_duration(), + ); + slot_clock.set_current_time(freeze_at); + slot_clock } } diff --git a/common/slot_clock/src/manual_slot_clock.rs b/common/slot_clock/src/manual_slot_clock.rs index 567a6b4cd9..296247fe93 100644 --- a/common/slot_clock/src/manual_slot_clock.rs +++ b/common/slot_clock/src/manual_slot_clock.rs @@ -154,6 +154,10 @@ impl SlotClock for ManualSlotClock { fn genesis_slot(&self) -> Slot { self.genesis_slot } + + fn genesis_duration(&self) -> Duration { + self.genesis_duration + } } #[cfg(test)] diff --git a/common/slot_clock/src/system_time_slot_clock.rs b/common/slot_clock/src/system_time_slot_clock.rs index c5d6dedc9b..c54646fbc6 100644 --- a/common/slot_clock/src/system_time_slot_clock.rs +++ b/common/slot_clock/src/system_time_slot_clock.rs @@ -61,6 +61,10 @@ impl SlotClock for SystemTimeSlotClock { fn genesis_slot(&self) -> Slot { self.clock.genesis_slot() } + + fn genesis_duration(&self) -> Duration { + *self.clock.genesis_duration() + } } #[cfg(test)] diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 0e15e16e02..6874966abd 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -125,7 +125,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime. /// - /// The future is wrapped in an `exit_future::Exit`. The task is canceled when the corresponding + /// The future is wrapped in an `exit_future::Exit`. The task is cancelled when the corresponding /// exit_future `Signal` is fired/dropped. /// /// The future is monitored via another spawned future to ensure that it doesn't panic. In case diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 19755c31ab..784d4d1df0 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -16,7 +16,7 @@ filesystem = { path = "../filesystem" } types = { path = "../../consensus/types" } rand = "0.7.3" deposit_contract = { path = "../deposit_contract" } -tree_hash = "0.4.0" +tree_hash = "0.4.1" hex = "0.4.2" derivative = "2.1.1" lockfile = { path = "../lockfile" } diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index bfa3e2553d..2fabebc743 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -63,7 +63,7 @@ pub struct Eth1DepositData { pub struct ValidatorDir { dir: PathBuf, #[derivative(PartialEq = "ignore")] - lockfile: Lockfile, + _lockfile: Lockfile, } impl ValidatorDir { @@ -85,7 +85,10 @@ impl ValidatorDir { let lockfile_path = dir.join(format!("{}.lock", VOTING_KEYSTORE_FILE)); let lockfile = Lockfile::new(lockfile_path).map_err(Error::LockfileError)?; - Ok(Self { dir, lockfile }) + Ok(Self { + dir, + _lockfile: lockfile, + }) } /// Returns the `dir` provided to `Self::open`. diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index f99d7773b9..09b6f125fc 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -18,3 +18,4 @@ tokio = { version = "1.14.0", features = ["sync"] } headers = "0.3.2" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" +serde_array_query = "0.1.0" diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs index 5f37dde87d..346361b18f 100644 --- a/common/warp_utils/src/lib.rs +++ b/common/warp_utils/src/lib.rs @@ -3,5 +3,6 @@ pub mod cors; pub mod metrics; +pub mod query; pub mod reject; pub mod task; diff --git a/common/warp_utils/src/query.rs b/common/warp_utils/src/query.rs new file mode 100644 index 0000000000..c5ed5c5f12 --- /dev/null +++ b/common/warp_utils/src/query.rs @@ -0,0 +1,22 @@ +use crate::reject::custom_bad_request; +use serde::Deserialize; +use warp::Filter; + +// Custom query filter using `serde_array_query`. +// This allows duplicate keys inside query strings. +pub fn multi_key_query<'de, T: Deserialize<'de>>( +) -> impl warp::Filter,), Error = std::convert::Infallible> + Copy +{ + raw_query().then(|query_str: String| async move { + serde_array_query::from_str(&query_str).map_err(|e| custom_bad_request(e.to_string())) + }) +} + +// This ensures that empty query strings are still accepted. +// This is because warp::filters::query::raw() does not allow empty query strings +// but warp::query::() does. +fn raw_query() -> impl Filter + Copy { + warp::filters::query::raw() + .or(warp::any().map(String::default)) + .unify() +} diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index 2816bba0e6..b77c800b10 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -6,11 +6,11 @@ edition = "2018" [dependencies] ethereum-types = "0.12.1" -eth2_ssz_types = "0.2.1" +eth2_ssz_types = "0.2.2" eth2_hashing = "0.2.0" eth2_ssz_derive = "0.3.0" -eth2_ssz = "0.4.0" -tree_hash = "0.4.0" +eth2_ssz = "0.4.1" +tree_hash = "0.4.1" smallvec = "1.6.1" [dev-dependencies] diff --git a/consensus/cached_tree_hash/src/cache_arena.rs b/consensus/cached_tree_hash/src/cache_arena.rs index 9e11134aab..a938d48266 100644 --- a/consensus/cached_tree_hash/src/cache_arena.rs +++ b/consensus/cached_tree_hash/src/cache_arena.rs @@ -491,8 +491,8 @@ mod tests { subs.push(sub); } - for mut sub in subs.iter_mut() { - test_routine(arena, &mut sub); + for sub in subs.iter_mut() { + test_routine(arena, sub); } } } diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index f708045df1..a17b31db64 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" [dependencies] types = { path = "../types" } proto_array = { path = "../proto_array" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index af44176eb7..801b0f39df 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,14 +1,14 @@ -use std::marker::PhantomData; - -use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice}; -use ssz_derive::{Decode, Encode}; -use types::{ - AttestationShufflingId, BeaconBlock, BeaconState, BeaconStateError, ChainSpec, Checkpoint, - Epoch, EthSpec, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, -}; - use crate::ForkChoiceStore; +use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice}; +use ssz_derive::{Decode, Encode}; use std::cmp::Ordering; +use std::marker::PhantomData; +use std::time::Duration; +use types::{ + consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlock, BeaconState, + BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, + RelativeEpoch, SignedBeaconBlock, Slot, +}; #[derive(Debug)] pub enum Error { @@ -16,6 +16,7 @@ pub enum Error { InvalidBlock(InvalidBlock), ProtoArrayError(String), InvalidProtoArrayBytes(String), + InvalidLegacyProtoArrayBytes(String), MissingProtoArrayBlock(Hash256), UnknownAncestor { ancestor_slot: Slot, @@ -37,6 +38,11 @@ pub enum Error { block_slot: Slot, state_slot: Slot, }, + InvalidPayloadStatus { + block_slot: Slot, + block_root: Hash256, + payload_verification_status: PayloadVerificationStatus, + }, } impl From for Error { @@ -100,6 +106,19 @@ impl From for Error { } } +/// Indicates if a block has been verified by an execution payload. +/// +/// There is no variant for "invalid", since such a block should never be added to fork choice. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum PayloadVerificationStatus { + /// An EL has declared the execution payload to be valid. + Verified, + /// An EL has not yet made a determination about the execution payload. + NotVerified, + /// The block is either pre-merge-fork, or prior to the terminal PoW block. + Irrelevant, +} + /// Calculate how far `slot` lies from the start of its epoch. /// /// ## Specification @@ -149,6 +168,13 @@ where store.set_current_slot(time); let current_slot = store.get_current_slot(); + + // Reset proposer boost if this is a new slot. + if current_slot > previous_slot { + store.set_proposer_boost_root(Hash256::zero()); + } + + // Not a new epoch, return. if !(current_slot > previous_slot && compute_slots_since_epoch_start::(current_slot) == 0) { return Ok(()); } @@ -199,6 +225,15 @@ fn dequeue_attestations( std::mem::replace(queued_attestations, remaining) } +/// Denotes whether an attestation we are processing was received from a block or from gossip. +/// Equivalent to the `is_from_block` `bool` in: +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation +pub enum AttestationFromBlock { + True, + False, +} + /// Provides an implementation of "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#ethereum-20-phase-0----beacon-chain-fork-choice @@ -260,14 +295,24 @@ where AttestationShufflingId::new(anchor_block_root, anchor_state, RelativeEpoch::Next) .map_err(Error::BeaconStateError)?; + // Default any non-merge execution block hashes to 0x000..000. + let execution_status = anchor_block.message_merge().map_or_else( + |()| ExecutionStatus::irrelevant(), + |message| { + // Assume that this payload is valid, since the anchor should be a trusted block and + // state. + ExecutionStatus::Valid(message.body.execution_payload.block_hash) + }, + ); + let proto_array = ProtoArrayForkChoice::new( finalized_block_slot, finalized_block_state_root, - fc_store.justified_checkpoint().epoch, - fc_store.finalized_checkpoint().epoch, - fc_store.finalized_checkpoint().root, + *fc_store.justified_checkpoint(), + *fc_store.finalized_checkpoint(), current_epoch_shuffling_id, next_epoch_shuffling_id, + execution_status, )?; Ok(Self { @@ -347,7 +392,11 @@ where /// Is equivalent to: /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_head - pub fn get_head(&mut self, current_slot: Slot) -> Result> { + pub fn get_head( + &mut self, + current_slot: Slot, + spec: &ChainSpec, + ) -> Result> { self.update_time(current_slot)?; let store = &mut self.fc_store; @@ -356,11 +405,12 @@ where let justified_balances = store.justified_balances().to_vec(); self.proto_array - .find_head( - store.justified_checkpoint().epoch, - store.justified_checkpoint().root, - store.finalized_checkpoint().epoch, - &justified_balances, + .find_head::( + *store.justified_checkpoint(), + *store.finalized_checkpoint(), + store.justified_balances(), + store.proposer_boost_root(), + spec, ) .map_err(Into::into) } @@ -435,12 +485,15 @@ where /// /// The supplied block **must** pass the `state_transition` function as it will not be run /// here. + #[allow(clippy::too_many_arguments)] pub fn on_block( &mut self, current_slot: Slot, block: &BeaconBlock, block_root: Hash256, + block_delay: Duration, state: &BeaconState, + payload_verification_status: PayloadVerificationStatus, spec: &ChainSpec, ) -> Result<(), Error> { let current_slot = self.update_time(current_slot)?; @@ -492,6 +545,13 @@ where })); } + // Add proposer score boost if the block is timely. + let is_before_attesting_interval = + block_delay < Duration::from_secs(spec.seconds_per_slot / INTERVALS_PER_SLOT); + if current_slot == block.slot() && is_before_attesting_interval { + self.fc_store.set_proposer_boost_root(block_root); + } + // Update justified checkpoint. if state.current_justified_checkpoint().epoch > self.fc_store.justified_checkpoint().epoch { if state.current_justified_checkpoint().epoch @@ -511,25 +571,9 @@ where if state.finalized_checkpoint().epoch > self.fc_store.finalized_checkpoint().epoch { self.fc_store .set_finalized_checkpoint(state.finalized_checkpoint()); - let finalized_slot = - compute_start_slot_at_epoch::(self.fc_store.finalized_checkpoint().epoch); - - // Note: the `if` statement here is not part of the specification, but I claim that it - // is an optimization and equivalent to the specification. See this PR for more - // information: - // - // https://github.com/ethereum/eth2.0-specs/pull/1880 - if *self.fc_store.justified_checkpoint() != state.current_justified_checkpoint() - && (state.current_justified_checkpoint().epoch - > self.fc_store.justified_checkpoint().epoch - || self - .get_ancestor(self.fc_store.justified_checkpoint().root, finalized_slot)? - != Some(self.fc_store.finalized_checkpoint().root)) - { - self.fc_store - .set_justified_checkpoint(state.current_justified_checkpoint()) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; - } + self.fc_store + .set_justified_checkpoint(state.current_justified_checkpoint()) + .map_err(Error::UnableToSetJustifiedCheckpoint)?; } let target_slot = block @@ -548,6 +592,33 @@ where .on_verified_block(block, block_root, state) .map_err(Error::AfterBlockFailed)?; + let execution_status = if let Ok(execution_payload) = block.body().execution_payload() { + let block_hash = execution_payload.block_hash; + + if block_hash == Hash256::zero() { + // The block is post-merge-fork, but pre-terminal-PoW block. We don't need to verify + // the payload. + ExecutionStatus::irrelevant() + } else { + match payload_verification_status { + PayloadVerificationStatus::Verified => ExecutionStatus::Valid(block_hash), + PayloadVerificationStatus::NotVerified => ExecutionStatus::Unknown(block_hash), + // It would be a logic error to declare a block irrelevant if it has an + // execution payload with a non-zero block hash. + PayloadVerificationStatus::Irrelevant => { + return Err(Error::InvalidPayloadStatus { + block_slot: block.slot(), + block_root, + payload_verification_status, + }) + } + } + } + } else { + // There is no payload to verify. + ExecutionStatus::irrelevant() + }; + // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. self.proto_array.process_block(ProtoBlock { @@ -568,13 +639,43 @@ where ) .map_err(Error::BeaconStateError)?, state_root: block.state_root(), - justified_epoch: state.current_justified_checkpoint().epoch, - finalized_epoch: state.finalized_checkpoint().epoch, + justified_checkpoint: state.current_justified_checkpoint(), + finalized_checkpoint: state.finalized_checkpoint(), + execution_status, })?; Ok(()) } + /// Validates the `epoch` against the current time according to the fork choice store. + /// + /// ## Specification + /// + /// Equivalent to: + /// + /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_target_epoch_against_current_time + fn validate_target_epoch_against_current_time( + &self, + target_epoch: Epoch, + ) -> Result<(), InvalidAttestation> { + let slot_now = self.fc_store.get_current_slot(); + let epoch_now = slot_now.epoch(E::slots_per_epoch()); + + // Attestation must be from the current or previous epoch. + if target_epoch > epoch_now { + return Err(InvalidAttestation::FutureEpoch { + attestation_epoch: target_epoch, + current_epoch: epoch_now, + }); + } else if target_epoch + 1 < epoch_now { + return Err(InvalidAttestation::PastEpoch { + attestation_epoch: target_epoch, + current_epoch: epoch_now, + }); + } + Ok(()) + } + /// Validates the `indexed_attestation` for application to fork choice. /// /// ## Specification @@ -585,6 +686,7 @@ where fn validate_on_attestation( &self, indexed_attestation: &IndexedAttestation, + is_from_block: AttestationFromBlock, ) -> Result<(), InvalidAttestation> { // There is no point in processing an attestation with an empty bitfield. Reject // it immediately. @@ -595,21 +697,10 @@ where return Err(InvalidAttestation::EmptyAggregationBitfield); } - let slot_now = self.fc_store.get_current_slot(); - let epoch_now = slot_now.epoch(E::slots_per_epoch()); let target = indexed_attestation.data.target; - // Attestation must be from the current or previous epoch. - if target.epoch > epoch_now { - return Err(InvalidAttestation::FutureEpoch { - attestation_epoch: target.epoch, - current_epoch: epoch_now, - }); - } else if target.epoch + 1 < epoch_now { - return Err(InvalidAttestation::PastEpoch { - attestation_epoch: target.epoch, - current_epoch: epoch_now, - }); + if matches!(is_from_block, AttestationFromBlock::False) { + self.validate_target_epoch_against_current_time(target.epoch)?; } if target.epoch != indexed_attestation.data.slot.epoch(E::slots_per_epoch()) { @@ -692,6 +783,7 @@ where &mut self, current_slot: Slot, attestation: &IndexedAttestation, + is_from_block: AttestationFromBlock, ) -> Result<(), Error> { // Ensure the store is up-to-date. self.update_time(current_slot)?; @@ -713,7 +805,7 @@ where return Ok(()); } - self.validate_on_attestation(attestation)?; + self.validate_on_attestation(attestation, is_from_block)?; if attestation.data.slot < self.fc_store.get_current_slot() { for validator_index in attestation.attesting_indices.iter() { @@ -839,6 +931,11 @@ where &self.queued_attestations } + /// Returns the store's `proposer_boost_root`. + pub fn proposer_boost_root(&self) -> Hash256 { + self.fc_store.proposer_boost_root() + } + /// Prunes the underlying fork choice DAG. pub fn prune(&mut self) -> Result<(), Error> { let finalized_root = self.fc_store.finalized_checkpoint().root; @@ -880,7 +977,7 @@ where /// This is used when persisting the state of the fork choice to disk. #[derive(Encode, Decode, Clone)] pub struct PersistedForkChoice { - proto_array_bytes: Vec, + pub proto_array_bytes: Vec, queued_attestations: Vec, } diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index c74610cc0e..9b85708f34 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -19,7 +19,7 @@ use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; pub trait ForkChoiceStore: Sized { type Error; - /// Returns the last value passed to `Self::update_time`. + /// Returns the last value passed to `Self::set_current_slot`. fn get_current_slot(&self) -> Slot; /// Set the value to be returned by `Self::get_current_slot`. @@ -50,6 +50,9 @@ pub trait ForkChoiceStore: Sized { /// Returns the `finalized_checkpoint`. fn finalized_checkpoint(&self) -> &Checkpoint; + /// Returns the `proposer_boost_root`. + fn proposer_boost_root(&self) -> Hash256; + /// Sets `finalized_checkpoint`. fn set_finalized_checkpoint(&mut self, checkpoint: Checkpoint); @@ -58,4 +61,7 @@ pub trait ForkChoiceStore: Sized { /// Sets the `best_justified_checkpoint`. fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint); + + /// Sets the proposer boost root. + fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256); } diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 5e9deac3b5..ba031cdf7f 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -2,7 +2,8 @@ mod fork_choice; mod fork_choice_store; pub use crate::fork_choice::{ - Error, ForkChoice, InvalidAttestation, InvalidBlock, PersistedForkChoice, QueuedAttestation, + AttestationFromBlock, Error, ForkChoice, InvalidAttestation, InvalidBlock, + PayloadVerificationStatus, PersistedForkChoice, QueuedAttestation, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::Block as ProtoBlock; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 2c0d498e19..42b56f6abf 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -2,6 +2,7 @@ use std::fmt; use std::sync::Mutex; +use std::time::Duration; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, @@ -10,7 +11,9 @@ use beacon_chain::{ BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError, StateSkipConfig, WhenSlotSkipped, }; -use fork_choice::{ForkChoiceStore, InvalidAttestation, InvalidBlock, QueuedAttestation}; +use fork_choice::{ + ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, +}; use store::MemoryStore; use types::{ test_utils::generate_deterministic_keypair, BeaconBlock, BeaconBlockRef, BeaconState, @@ -272,7 +275,9 @@ impl ForkChoiceTest { current_slot, &block, block.canonical_root(), + Duration::from_secs(0), &state, + PayloadVerificationStatus::Verified, &self.harness.chain.spec, ) .unwrap(); @@ -313,7 +318,9 @@ impl ForkChoiceTest { current_slot, &block, block.canonical_root(), + Duration::from_secs(0), &state, + PayloadVerificationStatus::Verified, &self.harness.chain.spec, ) .err() diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 6be269fcff..2794d3c8e1 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -10,7 +10,7 @@ path = "src/bin.rs" [dependencies] types = { path = "../types" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" serde = "1.0.116" serde_derive = "1.0.116" diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 11265aa362..adb10c035d 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -1,4 +1,4 @@ -use types::{Epoch, Hash256}; +use types::{Checkpoint, Epoch, Hash256}; #[derive(Clone, PartialEq, Debug)] pub enum Error { @@ -13,6 +13,7 @@ pub enum Error { InvalidParentDelta(usize), InvalidNodeDelta(usize), DeltaOverflow(usize), + ProposerBoostOverflow(usize), IndexOverflow(&'static str), InvalidDeltaLen { deltas: usize, @@ -22,12 +23,19 @@ pub enum Error { current_finalized_epoch: Epoch, new_finalized_epoch: Epoch, }, - InvalidBestNode { - start_root: Hash256, - justified_epoch: Epoch, - finalized_epoch: Epoch, - head_root: Hash256, - head_justified_epoch: Epoch, - head_finalized_epoch: Epoch, + InvalidBestNode(Box), + InvalidAncestorOfValidPayload { + ancestor_block_root: Hash256, + ancestor_payload_block_hash: Hash256, }, } + +#[derive(Clone, PartialEq, Debug)] +pub struct InvalidBestNodeInfo { + pub start_root: Hash256, + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, + pub head_root: Hash256, + pub head_justified_checkpoint: Option, + pub head_finalized_checkpoint: Option, +} diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 688878e1ae..e28fc67718 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -2,9 +2,9 @@ mod ffg_updates; mod no_votes; mod votes; -use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice}; +use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use serde_derive::{Deserialize, Serialize}; -use types::{AttestationShufflingId, Epoch, Hash256, Slot}; +use types::{AttestationShufflingId, Checkpoint, Epoch, EthSpec, Hash256, MainnetEthSpec, Slot}; pub use ffg_updates::*; pub use no_votes::*; @@ -13,24 +13,22 @@ pub use votes::*; #[derive(Debug, Clone, Serialize, Deserialize)] pub enum Operation { FindHead { - justified_epoch: Epoch, - justified_root: Hash256, - finalized_epoch: Epoch, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, justified_state_balances: Vec, expected_head: Hash256, }, InvalidFindHead { - justified_epoch: Epoch, - justified_root: Hash256, - finalized_epoch: Epoch, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, justified_state_balances: Vec, }, ProcessBlock { slot: Slot, root: Hash256, parent_root: Hash256, - justified_epoch: Epoch, - finalized_epoch: Epoch, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, }, ProcessAttestation { validator_index: usize, @@ -47,9 +45,8 @@ pub enum Operation { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ForkChoiceTestDefinition { pub finalized_block_slot: Slot, - pub justified_epoch: Epoch, - pub finalized_epoch: Epoch, - pub finalized_root: Hash256, + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, pub operations: Vec, } @@ -57,35 +54,37 @@ impl ForkChoiceTestDefinition { pub fn run(self) { let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); + let execution_status = ExecutionStatus::irrelevant(); let mut fork_choice = ProtoArrayForkChoice::new( self.finalized_block_slot, Hash256::zero(), - self.justified_epoch, - self.finalized_epoch, - self.finalized_root, + self.justified_checkpoint, + self.finalized_checkpoint, junk_shuffling_id.clone(), junk_shuffling_id, + execution_status, ) .expect("should create fork choice struct"); for (op_index, op) in self.operations.into_iter().enumerate() { match op.clone() { Operation::FindHead { - justified_epoch, - justified_root, - finalized_epoch, + justified_checkpoint, + finalized_checkpoint, justified_state_balances, expected_head, } => { let head = fork_choice - .find_head( - justified_epoch, - justified_root, - finalized_epoch, + .find_head::( + justified_checkpoint, + finalized_checkpoint, &justified_state_balances, + Hash256::zero(), + &MainnetEthSpec::default_spec(), ) - .unwrap_or_else(|_| { - panic!("find_head op at index {} returned error", op_index) + .map_err(|e| e) + .unwrap_or_else(|e| { + panic!("find_head op at index {} returned error {}", op_index, e) }); assert_eq!( @@ -96,16 +95,16 @@ impl ForkChoiceTestDefinition { check_bytes_round_trip(&fork_choice); } Operation::InvalidFindHead { - justified_epoch, - justified_root, - finalized_epoch, + justified_checkpoint, + finalized_checkpoint, justified_state_balances, } => { - let result = fork_choice.find_head( - justified_epoch, - justified_root, - finalized_epoch, + let result = fork_choice.find_head::( + justified_checkpoint, + finalized_checkpoint, &justified_state_balances, + Hash256::zero(), + &MainnetEthSpec::default_spec(), ); assert!( @@ -120,8 +119,8 @@ impl ForkChoiceTestDefinition { slot, root, parent_root, - justified_epoch, - finalized_epoch, + justified_checkpoint, + finalized_checkpoint, } => { let block = Block { slot, @@ -137,8 +136,9 @@ impl ForkChoiceTestDefinition { Epoch::new(0), Hash256::zero(), ), - justified_epoch, - finalized_epoch, + justified_checkpoint, + finalized_checkpoint, + execution_status, }; fork_choice.process_block(block).unwrap_or_else(|e| { panic!( @@ -190,7 +190,16 @@ impl ForkChoiceTestDefinition { /// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. fn get_hash(i: u64) -> Hash256 { - Hash256::from_low_u64_be(i) + Hash256::from_low_u64_be(i + 1) +} + +/// Gives a checkpoint with a root that is not the zero hash (unless i is `usize::max_value)`. +/// `Epoch` will always equal `i`. +fn get_checkpoint(i: u64) -> Checkpoint { + Checkpoint { + epoch: Epoch::new(i), + root: get_hash(i), + } } fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { diff --git a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs index 4b7eb25d78..a129064504 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/ffg_updates.rs @@ -6,9 +6,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { // Ensure that the head starts at the finalized block. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(0), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(0), }); @@ -26,22 +25,22 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { slot: Slot::new(1), root: get_hash(1), parent_root: get_hash(0), - justified_epoch: Epoch::new(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), root: get_hash(2), parent_root: get_hash(1), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(1), + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), root: get_hash(3), parent_root: get_hash(2), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(1), + justified_checkpoint: get_checkpoint(2), + finalized_checkpoint: get_checkpoint(1), }); // Ensure that with justified epoch 0 we find 3 @@ -54,9 +53,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { // | // 3 <- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(0), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(3), }); @@ -71,9 +69,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { // | // 3 <- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(2), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(1), + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(2), }); @@ -88,9 +85,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { // | // 3 <- start + head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(3), - finalized_epoch: Epoch::new(1), + justified_checkpoint: get_checkpoint(2), + finalized_checkpoint: get_checkpoint(1), justified_state_balances: balances, expected_head: get_hash(3), }); @@ -98,9 +94,8 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { // END OF TESTS ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), - finalized_root: get_hash(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), operations: ops, } } @@ -111,9 +106,8 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // Ensure that the head starts at the finalized block. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(0), }); @@ -137,36 +131,48 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { slot: Slot::new(1), root: get_hash(1), parent_root: get_hash(0), - justified_epoch: Epoch::new(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), root: get_hash(3), parent_root: get_hash(1), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(1), + }, + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), root: get_hash(5), parent_root: get_hash(3), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(1), + }, + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), root: get_hash(7), parent_root: get_hash(5), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(1), + }, + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { - slot: Slot::new(4), + slot: Slot::new(5), root: get_hash(9), parent_root: get_hash(7), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(3), + }, + finalized_checkpoint: get_checkpoint(0), }); // Right branch @@ -174,36 +180,42 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { slot: Slot::new(1), root: get_hash(2), parent_root: get_hash(0), - justified_epoch: Epoch::new(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(2), root: get_hash(4), parent_root: get_hash(2), - justified_epoch: Epoch::new(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(3), root: get_hash(6), parent_root: get_hash(4), - justified_epoch: Epoch::new(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { slot: Slot::new(4), root: get_hash(8), parent_root: get_hash(6), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(2), + }, + finalized_checkpoint: get_checkpoint(0), }); ops.push(Operation::ProcessBlock { - slot: Slot::new(4), + slot: Slot::new(5), root: get_hash(10), parent_root: get_hash(8), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(4), + }, + finalized_checkpoint: get_checkpoint(0), }); // Ensure that if we start at 0 we find 10 (just: 0, fin: 0). @@ -220,25 +232,28 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // | | // 9 10 <-- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(0), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(10), }); // Same as above, but with justified epoch 2. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(4), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(10), }); // Same as above, but with justified epoch 3 (should be invalid). ops.push(Operation::InvalidFindHead { - justified_epoch: Epoch::new(3), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(3), + root: get_hash(6), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), }); @@ -275,25 +290,28 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // | | // head -> 9 10 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(0), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(9), }); // Save as above but justified epoch 2. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(3), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(9), }); // Save as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { - justified_epoch: Epoch::new(3), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(3), + root: get_hash(5), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), }); @@ -330,25 +348,28 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // | | // 9 10 <-- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(0), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(10), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(4), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(10), }); // Same as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { - justified_epoch: Epoch::new(3), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(3), + root: get_hash(6), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), }); @@ -366,25 +387,31 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // | | // head -> 9 10 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(0), - justified_root: get_hash(1), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(0), + root: get_hash(1), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(9), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(1), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(3), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(9), }); // Same as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { - justified_epoch: Epoch::new(3), - justified_root: get_hash(1), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(3), + root: get_hash(5), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), }); @@ -402,34 +429,36 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { // | | // 9 10 <- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(0), - justified_root: get_hash(2), - finalized_epoch: Epoch::new(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(10), }); // Same as above but justified epoch 2. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(2), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(4), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances.clone(), expected_head: get_hash(10), }); // Same as above but justified epoch 3 (should fail). ops.push(Operation::InvalidFindHead { - justified_epoch: Epoch::new(3), - justified_root: get_hash(2), - finalized_epoch: Epoch::new(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(3), + root: get_hash(6), + }, + finalized_checkpoint: get_checkpoint(0), justified_state_balances: balances, }); // END OF TESTS ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), - finalized_root: get_hash(0), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), operations: ops, } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index e42abe2885..0fbcafc5d4 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -6,9 +6,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { let operations = vec![ // Check that the head is the finalized block. Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: Hash256::zero(), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, justified_state_balances: balances.clone(), expected_head: Hash256::zero(), }, @@ -18,11 +23,17 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // / // 2 Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(1), root: get_hash(2), - parent_root: get_hash(0), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), + parent_root: Hash256::zero(), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, }, // Ensure the head is 2 // @@ -30,9 +41,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // / // 2 <- head Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: Hash256::zero(), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, justified_state_balances: balances.clone(), expected_head: get_hash(2), }, @@ -42,11 +58,17 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // 2 1 Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(1), root: get_hash(1), parent_root: get_hash(0), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, }, // Ensure the head is still 2 // @@ -54,9 +76,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // head-> 2 1 Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: Hash256::zero(), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, justified_state_balances: balances.clone(), expected_head: get_hash(2), }, @@ -68,11 +95,17 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | // 3 Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(2), root: get_hash(3), parent_root: get_hash(1), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, }, // Ensure 2 is still the head // @@ -82,9 +115,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | // 3 Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: Hash256::zero(), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, justified_state_balances: balances.clone(), expected_head: get_hash(2), }, @@ -96,11 +134,17 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | | // 4 3 Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(2), root: get_hash(4), parent_root: get_hash(2), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, }, // Ensure the head is 4. // @@ -110,9 +154,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | | // head-> 4 3 Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: Hash256::zero(), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, justified_state_balances: balances.clone(), expected_head: get_hash(4), }, @@ -126,11 +175,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | // 5 <- justified epoch = 2 Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(3), root: get_hash(5), parent_root: get_hash(4), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(1), + justified_checkpoint: get_checkpoint(2), + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, }, // Ensure the head is still 4 whilst the justified epoch is 0. // @@ -142,9 +194,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | // 5 Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: Hash256::zero(), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, justified_state_balances: balances.clone(), expected_head: get_hash(4), }, @@ -158,9 +215,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | // 5 <- starting from 5 with justified epoch 0 should error. Operation::InvalidFindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, justified_state_balances: balances.clone(), }, // Set the justified epoch to 2 and the start block to 5 and ensure 5 is the head. @@ -173,9 +235,11 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | // 5 <- head Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(1), + justified_checkpoint: get_checkpoint(2), + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, justified_state_balances: balances.clone(), expected_head: get_hash(5), }, @@ -191,11 +255,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | // 6 Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(4), root: get_hash(6), parent_root: get_hash(5), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(1), + justified_checkpoint: get_checkpoint(2), + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, }, // Ensure 6 is the head // @@ -209,9 +276,11 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { // | // 6 <- head Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(1), + justified_checkpoint: get_checkpoint(2), + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, justified_state_balances: balances, expected_head: get_hash(6), }, @@ -219,9 +288,14 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), - finalized_root: get_hash(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: Hash256::zero(), + }, operations, } } diff --git a/consensus/proto_array/src/fork_choice_test_definition/votes.rs b/consensus/proto_array/src/fork_choice_test_definition/votes.rs index ac9513c5f2..f65177a849 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/votes.rs @@ -6,9 +6,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // Ensure that the head starts at the finalized block. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(0), }); @@ -19,11 +24,17 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / // 2 ops.push(Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(1), root: get_hash(2), parent_root: get_hash(0), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, }); // Ensure that the head is 2 @@ -32,9 +43,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / // head-> 2 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(2), }); @@ -46,11 +62,17 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // 2 1 ops.push(Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(1), root: get_hash(1), parent_root: get_hash(0), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, }); // Ensure that the head is still 2 @@ -59,9 +81,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // head-> 2 1 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(2), }); @@ -77,15 +104,20 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { target_epoch: Epoch::new(2), }); - // Ensure that the head is now 1, beacuse 1 has a vote. + // Ensure that the head is now 1, because 1 has a vote. // // 0 // / \ // 2 1 <- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(1), }); @@ -107,9 +139,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // head-> 2 1 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(2), }); @@ -122,11 +159,17 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 3 ops.push(Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(2), root: get_hash(3), parent_root: get_hash(1), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, }); // Ensure that the head is still 2 @@ -137,9 +180,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 3 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(2), }); @@ -165,9 +213,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 3 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(2), }); @@ -194,9 +247,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 3 <- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(3), }); @@ -211,11 +269,17 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 4 ops.push(Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(3), root: get_hash(4), parent_root: get_hash(3), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, }); // Ensure that the head is now 4 @@ -228,9 +292,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // 4 <- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(4), }); @@ -247,11 +316,17 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / // 5 <- justified epoch = 2 ops.push(Operation::ProcessBlock { - slot: Slot::new(0), + slot: Slot::new(4), root: get_hash(5), parent_root: get_hash(4), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(1), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(1), + }, }); // Ensure that 5 is filtered out and the head stays at 4. @@ -266,9 +341,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / // 5 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(4), }); @@ -288,8 +368,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { slot: Slot::new(0), root: get_hash(6), parent_root: get_hash(4), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, }); // Move both votes to 5. @@ -336,22 +422,40 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { slot: Slot::new(0), root: get_hash(7), parent_root: get_hash(5), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), root: get_hash(8), parent_root: get_hash(7), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, }); ops.push(Operation::ProcessBlock { slot: Slot::new(0), root: get_hash(9), parent_root: get_hash(8), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, }); // Ensure that 6 is the head, even though 5 has all the votes. This is testing to ensure @@ -373,9 +477,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / // 9 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(1), - justified_root: get_hash(0), - finalized_epoch: Epoch::new(1), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, justified_state_balances: balances.clone(), expected_head: get_hash(6), }); @@ -401,9 +510,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / // head-> 9 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, justified_state_balances: balances.clone(), expected_head: get_hash(9), }); @@ -460,15 +574,26 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { slot: Slot::new(0), root: get_hash(10), parent_root: get_hash(8), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, }); // Double-check the head is still 9 (no diagram this time) ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, justified_state_balances: balances.clone(), expected_head: get_hash(9), }); @@ -522,9 +647,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // 9 10 <- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, justified_state_balances: balances.clone(), expected_head: get_hash(10), }); @@ -542,9 +672,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // head-> 9 10 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, justified_state_balances: balances.clone(), expected_head: get_hash(9), }); @@ -562,9 +697,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // 9 10 <- head ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, justified_state_balances: balances.clone(), expected_head: get_hash(10), }); @@ -583,9 +723,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // / \ // head-> 9 10 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, justified_state_balances: balances.clone(), expected_head: get_hash(9), }); @@ -599,9 +744,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // Run find-head, ensure the no-op prune didn't change the head. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, justified_state_balances: balances.clone(), expected_head: get_hash(9), }); @@ -632,9 +782,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // Run find-head, ensure the prune didn't change the head. ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, justified_state_balances: balances.clone(), expected_head: get_hash(9), }); @@ -654,8 +809,14 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { slot: Slot::new(0), root: get_hash(11), parent_root: get_hash(9), - justified_epoch: Epoch::new(2), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, }); // Ensure the head is now 11 @@ -670,18 +831,28 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { // | // head-> 11 ops.push(Operation::FindHead { - justified_epoch: Epoch::new(2), - justified_root: get_hash(5), - finalized_epoch: Epoch::new(2), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(2), + root: get_hash(5), + }, justified_state_balances: balances, expected_head: get_hash(11), }); ForkChoiceTestDefinition { finalized_block_slot: Slot::new(0), - justified_epoch: Epoch::new(1), - finalized_epoch: Epoch::new(1), - finalized_root: get_hash(0), + justified_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, + finalized_checkpoint: Checkpoint { + epoch: Epoch::new(1), + root: get_hash(0), + }, operations: ops, } } diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index d1c0ee63fe..216d189fb2 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -4,9 +4,11 @@ mod proto_array; mod proto_array_fork_choice; mod ssz_container; -pub use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice}; +pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; pub use error::Error; pub mod core { - pub use super::proto_array::ProtoArray; + pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; + pub use super::proto_array_fork_choice::VoteTracker; + pub use super::ssz_container::SszContainer; } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index b4d6dd9e0f..465ef9d4fc 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,13 +1,16 @@ -use crate::{error::Error, Block}; +use crate::error::InvalidBestNodeInfo; +use crate::{error::Error, Block, ExecutionStatus}; use serde_derive::{Deserialize, Serialize}; use ssz::four_byte_option_impl; +use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{AttestationShufflingId, Epoch, Hash256, Slot}; +use types::{AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union // selector. four_byte_option_impl!(four_byte_option_usize, usize); +four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); #[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] pub struct ProtoNode { @@ -28,13 +31,33 @@ pub struct ProtoNode { pub root: Hash256, #[ssz(with = "four_byte_option_usize")] pub parent: Option, - pub justified_epoch: Epoch, - pub finalized_epoch: Epoch, - weight: u64, + #[ssz(with = "four_byte_option_checkpoint")] + pub justified_checkpoint: Option, + #[ssz(with = "four_byte_option_checkpoint")] + pub finalized_checkpoint: Option, + pub weight: u64, #[ssz(with = "four_byte_option_usize")] - best_child: Option, + pub best_child: Option, #[ssz(with = "four_byte_option_usize")] - best_descendant: Option, + pub best_descendant: Option, + /// Indicates if an execution node has marked this block as valid. Also contains the execution + /// block hash. + pub execution_status: ExecutionStatus, +} + +#[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] +pub struct ProposerBoost { + pub root: Hash256, + pub score: u64, +} + +impl Default for ProposerBoost { + fn default() -> Self { + Self { + root: Hash256::zero(), + score: 0, + } + } } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -42,10 +65,11 @@ pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes /// simply waste time. pub prune_threshold: usize, - pub justified_epoch: Epoch, - pub finalized_epoch: Epoch, + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, pub nodes: Vec, pub indices: HashMap, + pub previous_proposer_boost: ProposerBoost, } impl ProtoArray { @@ -62,11 +86,14 @@ impl ProtoArray { /// - Compare the current node with the parents best-child, updating it if the current node /// should become the best child. /// - If required, update the parents best-descendant with the current node or its best-descendant. - pub fn apply_score_changes( + pub fn apply_score_changes( &mut self, mut deltas: Vec, - justified_epoch: Epoch, - finalized_epoch: Epoch, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + new_balances: &[u64], + proposer_boost_root: Hash256, + spec: &ChainSpec, ) -> Result<(), Error> { if deltas.len() != self.indices.len() { return Err(Error::InvalidDeltaLen { @@ -75,11 +102,16 @@ impl ProtoArray { }); } - if justified_epoch != self.justified_epoch || finalized_epoch != self.finalized_epoch { - self.justified_epoch = justified_epoch; - self.finalized_epoch = finalized_epoch; + if justified_checkpoint != self.justified_checkpoint + || finalized_checkpoint != self.finalized_checkpoint + { + self.justified_checkpoint = justified_checkpoint; + self.finalized_checkpoint = finalized_checkpoint; } + // Default the proposer boost score to zero. + let mut proposer_score = 0; + // Iterate backwards through all indices in `self.nodes`. for node_index in (0..self.nodes.len()).rev() { let node = self @@ -94,11 +126,35 @@ impl ProtoArray { continue; } - let node_delta = deltas + let mut node_delta = deltas .get(node_index) .copied() .ok_or(Error::InvalidNodeDelta(node_index))?; + // If we find the node for which the proposer boost was previously applied, decrease + // the delta by the previous score amount. + if self.previous_proposer_boost.root != Hash256::zero() + && self.previous_proposer_boost.root == node.root + { + node_delta = node_delta + .checked_sub(self.previous_proposer_boost.score as i64) + .ok_or(Error::DeltaOverflow(node_index))?; + } + // If we find the node matching the current proposer boost root, increase + // the delta by the new score amount. + // + // https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance + if let Some(proposer_score_boost) = spec.proposer_score_boost { + if proposer_boost_root != Hash256::zero() && proposer_boost_root == node.root { + proposer_score = + calculate_proposer_boost::(new_balances, proposer_score_boost) + .ok_or(Error::ProposerBoostOverflow(node_index))?; + node_delta = node_delta + .checked_add(proposer_score as i64) + .ok_or(Error::DeltaOverflow(node_index))?; + } + } + // Apply the delta to the node. if node_delta < 0 { // Note: I am conflicted about whether to use `saturating_sub` or `checked_sub` @@ -132,6 +188,12 @@ impl ProtoArray { } } + // After applying all deltas, update the `previous_proposer_boost`. + self.previous_proposer_boost = ProposerBoost { + root: proposer_boost_root, + score: proposer_score, + }; + // A second time, iterate backwards through all indices in `self.nodes`. // // We _must_ perform these functions separate from the weight-updating loop above to ensure @@ -173,11 +235,12 @@ impl ProtoArray { parent: block .parent_root .and_then(|parent| self.indices.get(&parent).copied()), - justified_epoch: block.justified_epoch, - finalized_epoch: block.finalized_epoch, + justified_checkpoint: Some(block.justified_checkpoint), + finalized_checkpoint: Some(block.finalized_checkpoint), weight: 0, best_child: None, best_descendant: None, + execution_status: block.execution_status, }; self.indices.insert(node.root, node_index); @@ -185,11 +248,58 @@ impl ProtoArray { if let Some(parent_index) = node.parent { self.maybe_update_best_child_and_descendant(parent_index, node_index)?; + + if matches!(block.execution_status, ExecutionStatus::Valid(_)) { + self.propagate_execution_payload_verification(parent_index)?; + } } Ok(()) } + pub fn propagate_execution_payload_verification( + &mut self, + verified_node_index: usize, + ) -> Result<(), Error> { + let mut index = verified_node_index; + loop { + let node = self + .nodes + .get_mut(index) + .ok_or(Error::InvalidNodeIndex(index))?; + let parent_index = match node.execution_status { + // We have reached a node that we already know is valid. No need to iterate further + // since we assume an ancestors have already been set to valid. + ExecutionStatus::Valid(_) => return Ok(()), + // We have reached an irrelevant node, this node is prior to a terminal execution + // block. There's no need to iterate further, it's impossible for this block to have + // any relevant ancestors. + ExecutionStatus::Irrelevant(_) => return Ok(()), + // The block has an unknown status, set it to valid since any ancestor of a valid + // payload can be considered valid. + ExecutionStatus::Unknown(payload_block_hash) => { + node.execution_status = ExecutionStatus::Valid(payload_block_hash); + if let Some(parent_index) = node.parent { + parent_index + } else { + // We have reached the root block, iteration complete. + return Ok(()); + } + } + // An ancestor of the valid payload was invalid. This is a serious error which + // indicates a consensus failure in the execution node. This is unrecoverable. + ExecutionStatus::Invalid(ancestor_payload_block_hash) => { + return Err(Error::InvalidAncestorOfValidPayload { + ancestor_block_root: node.root, + ancestor_payload_block_hash, + }) + } + }; + + index = parent_index; + } + } + /// Follows the best-descendant links to find the best-block (i.e., head-block). /// /// ## Notes @@ -219,14 +329,14 @@ impl ProtoArray { // Perform a sanity check that the node is indeed valid to be the head. if !self.node_is_viable_for_head(best_node) { - return Err(Error::InvalidBestNode { + return Err(Error::InvalidBestNode(Box::new(InvalidBestNodeInfo { start_root: *justified_root, - justified_epoch: self.justified_epoch, - finalized_epoch: self.finalized_epoch, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, head_root: justified_node.root, - head_justified_epoch: justified_node.justified_epoch, - head_finalized_epoch: justified_node.finalized_epoch, - }); + head_justified_checkpoint: justified_node.justified_checkpoint, + head_finalized_checkpoint: justified_node.finalized_checkpoint, + }))); } Ok(best_node.root) @@ -427,9 +537,16 @@ impl ProtoArray { /// Any node that has a different finalized or justified epoch should not be viable for the /// head. fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool { - (node.justified_epoch == self.justified_epoch || self.justified_epoch == Epoch::new(0)) - && (node.finalized_epoch == self.finalized_epoch - || self.finalized_epoch == Epoch::new(0)) + if let (Some(node_justified_checkpoint), Some(node_finalized_checkpoint)) = + (node.justified_checkpoint, node.finalized_checkpoint) + { + (node_justified_checkpoint == self.justified_checkpoint + || self.justified_checkpoint.epoch == Epoch::new(0)) + && (node_finalized_checkpoint == self.finalized_checkpoint + || self.finalized_checkpoint.epoch == Epoch::new(0)) + } else { + false + } } /// Return a reverse iterator over the nodes which comprise the chain ending at `block_root`. @@ -453,6 +570,38 @@ impl ProtoArray { } } +/// A helper method to calculate the proposer boost based on the given `validator_balances`. +/// This does *not* do any verification about whether a boost should or should not be applied. +/// The `validator_balances` array used here is assumed to be structured like the one stored in +/// the `BalancesCache`, where *effective* balances are stored and inactive balances are defaulted +/// to zero. +/// +/// Returns `None` if there is an overflow or underflow when calculating the score. +/// +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance +fn calculate_proposer_boost( + validator_balances: &[u64], + proposer_score_boost: u64, +) -> Option { + let mut total_balance: u64 = 0; + let mut num_validators: u64 = 0; + for &balance in validator_balances { + // We need to filter zero balances here to get an accurate active validator count. + // This is because we default inactive validator balances to zero when creating + // this balances array. + if balance != 0 { + total_balance = total_balance.checked_add(balance)?; + num_validators = num_validators.checked_add(1)?; + } + } + let average_balance = total_balance.checked_div(num_validators)?; + let committee_size = num_validators.checked_div(E::slots_per_epoch())?; + let committee_weight = committee_size.checked_mul(average_balance)?; + committee_weight + .checked_mul(proposer_score_boost)? + .checked_div(100) +} + /// Reverse iterator over one path through a `ProtoArray`. pub struct Iter<'a> { next_node_index: Option, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 36bdab2dbe..891eafabe9 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,10 +1,11 @@ use crate::error::Error; -use crate::proto_array::ProtoArray; +use crate::proto_array::{ProposerBoost, ProtoArray}; use crate::ssz_container::SszContainer; +use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{AttestationShufflingId, Epoch, Hash256, Slot}; +use types::{AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -15,6 +16,41 @@ pub struct VoteTracker { next_epoch: Epoch, } +/// Represents the verification status of an execution payload. +#[derive(Clone, Copy, Debug, PartialEq, Encode, Decode, Serialize, Deserialize)] +#[ssz(enum_behaviour = "union")] +pub enum ExecutionStatus { + /// An EL has determined that the payload is valid. + Valid(Hash256), + /// An EL has determined that the payload is invalid. + Invalid(Hash256), + /// An EL has not yet verified the execution payload. + Unknown(Hash256), + /// The block is either prior to the merge fork, or after the merge fork but before the terminal + /// PoW block has been found. + /// + /// # Note: + /// + /// This `bool` only exists to satisfy our SSZ implementation which requires all variants + /// to have a value. It can be set to anything. + Irrelevant(bool), // TODO(merge): fix bool. +} + +impl ExecutionStatus { + pub fn irrelevant() -> Self { + ExecutionStatus::Irrelevant(false) + } + + pub fn block_hash(&self) -> Option { + match self { + ExecutionStatus::Valid(hash) + | ExecutionStatus::Invalid(hash) + | ExecutionStatus::Unknown(hash) => Some(*hash), + ExecutionStatus::Irrelevant(_) => None, + } + } +} + /// A block that is to be applied to the fork choice. /// /// A simplified version of `types::BeaconBlock`. @@ -27,8 +63,11 @@ pub struct Block { pub target_root: Hash256, pub current_epoch_shuffling_id: AttestationShufflingId, pub next_epoch_shuffling_id: AttestationShufflingId, - pub justified_epoch: Epoch, - pub finalized_epoch: Epoch, + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, + /// Indicates if an execution node has marked this block as valid. Also contains the execution + /// block hash. + pub execution_status: ExecutionStatus, } /// A Vec-wrapper which will grow to match any request. @@ -66,35 +105,38 @@ pub struct ProtoArrayForkChoice { } impl ProtoArrayForkChoice { + #[allow(clippy::too_many_arguments)] pub fn new( finalized_block_slot: Slot, finalized_block_state_root: Hash256, - justified_epoch: Epoch, - finalized_epoch: Epoch, - finalized_root: Hash256, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, current_epoch_shuffling_id: AttestationShufflingId, next_epoch_shuffling_id: AttestationShufflingId, + execution_status: ExecutionStatus, ) -> Result { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, - justified_epoch, - finalized_epoch, + justified_checkpoint, + finalized_checkpoint, nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), + previous_proposer_boost: ProposerBoost::default(), }; let block = Block { slot: finalized_block_slot, - root: finalized_root, + root: finalized_checkpoint.root, parent_root: None, state_root: finalized_block_state_root, // We are using the finalized_root as the target_root, since it always lies on an // epoch boundary. - target_root: finalized_root, + target_root: finalized_checkpoint.root, current_epoch_shuffling_id, next_epoch_shuffling_id, - justified_epoch, - finalized_epoch, + justified_checkpoint, + finalized_checkpoint, + execution_status, }; proto_array @@ -134,12 +176,13 @@ impl ProtoArrayForkChoice { .map_err(|e| format!("process_block_error: {:?}", e)) } - pub fn find_head( + pub fn find_head( &mut self, - justified_epoch: Epoch, - justified_root: Hash256, - finalized_epoch: Epoch, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, justified_state_balances: &[u64], + proposer_boost_root: Hash256, + spec: &ChainSpec, ) -> Result { let old_balances = &mut self.balances; @@ -154,13 +197,20 @@ impl ProtoArrayForkChoice { .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; self.proto_array - .apply_score_changes(deltas, justified_epoch, finalized_epoch) + .apply_score_changes::( + deltas, + justified_checkpoint, + finalized_checkpoint, + new_balances, + proposer_boost_root, + spec, + ) .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; *old_balances = new_balances.to_vec(); self.proto_array - .find_head(&justified_root) + .find_head(&justified_checkpoint.root) .map_err(|e| format!("find_head failed: {:?}", e)) } @@ -194,17 +244,27 @@ impl ProtoArrayForkChoice { .and_then(|i| self.proto_array.nodes.get(i)) .map(|parent| parent.root); - Some(Block { - slot: block.slot, - root: block.root, - parent_root, - state_root: block.state_root, - target_root: block.target_root, - current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), - next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), - justified_epoch: block.justified_epoch, - finalized_epoch: block.finalized_epoch, - }) + // If a node does not have a `finalized_checkpoint` or `justified_checkpoint` populated, + // it means it is not a descendant of the finalized checkpoint, so it is valid to return + // `None` here. + if let (Some(justified_checkpoint), Some(finalized_checkpoint)) = + (block.justified_checkpoint, block.finalized_checkpoint) + { + Some(Block { + slot: block.slot, + root: block.root, + parent_root, + state_root: block.state_root, + target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), + next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), + justified_checkpoint, + finalized_checkpoint, + execution_status: block.execution_status, + }) + } else { + None + } } /// Returns `true` if the `descendant_root` has an ancestor with `ancestor_root`. Always @@ -258,6 +318,13 @@ impl ProtoArrayForkChoice { pub fn core_proto_array(&self) -> &ProtoArray { &self.proto_array } + + /// Returns a mutable reference to the core `ProtoArray` struct. + /// + /// Should only be used during database schema migrations. + pub fn core_proto_array_mut(&mut self) -> &mut ProtoArray { + &mut self.proto_array + } } /// Returns a list of `deltas`, where there is one delta for each of the indices in @@ -351,15 +418,21 @@ mod test_compute_deltas { let unknown = Hash256::from_low_u64_be(4); let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); + let execution_status = ExecutionStatus::irrelevant(); + + let genesis_checkpoint = Checkpoint { + epoch: genesis_epoch, + root: finalized_root, + }; let mut fc = ProtoArrayForkChoice::new( genesis_slot, state_root, - genesis_epoch, - genesis_epoch, - finalized_root, + genesis_checkpoint, + genesis_checkpoint, junk_shuffling_id.clone(), junk_shuffling_id.clone(), + execution_status, ) .unwrap(); @@ -373,8 +446,9 @@ mod test_compute_deltas { target_root: finalized_root, current_epoch_shuffling_id: junk_shuffling_id.clone(), next_epoch_shuffling_id: junk_shuffling_id.clone(), - justified_epoch: genesis_epoch, - finalized_epoch: genesis_epoch, + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, }) .unwrap(); @@ -388,8 +462,9 @@ mod test_compute_deltas { target_root: finalized_root, current_epoch_shuffling_id: junk_shuffling_id.clone(), next_epoch_shuffling_id: junk_shuffling_id, - justified_epoch: genesis_epoch, - finalized_epoch: genesis_epoch, + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, }) .unwrap(); diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index c79c433e39..7f7ef79fe8 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,20 +1,27 @@ +use crate::proto_array::ProposerBoost; use crate::{ proto_array::{ProtoArray, ProtoNode}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, }; +use ssz::{four_byte_option_impl, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{Epoch, Hash256}; +use types::{Checkpoint, Hash256}; + +// Define a "legacy" implementation of `Option` which uses four bytes for encoding the union +// selector. +four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); #[derive(Encode, Decode)] pub struct SszContainer { - votes: Vec, - balances: Vec, - prune_threshold: usize, - justified_epoch: Epoch, - finalized_epoch: Epoch, - nodes: Vec, - indices: Vec<(Hash256, usize)>, + pub votes: Vec, + pub balances: Vec, + pub prune_threshold: usize, + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, + pub nodes: Vec, + pub indices: Vec<(Hash256, usize)>, + pub previous_proposer_boost: ProposerBoost, } impl From<&ProtoArrayForkChoice> for SszContainer { @@ -25,10 +32,11 @@ impl From<&ProtoArrayForkChoice> for SszContainer { votes: from.votes.0.clone(), balances: from.balances.clone(), prune_threshold: proto_array.prune_threshold, - justified_epoch: proto_array.justified_epoch, - finalized_epoch: proto_array.finalized_epoch, + justified_checkpoint: proto_array.justified_checkpoint, + finalized_checkpoint: proto_array.finalized_checkpoint, nodes: proto_array.nodes.clone(), indices: proto_array.indices.iter().map(|(k, v)| (*k, *v)).collect(), + previous_proposer_boost: proto_array.previous_proposer_boost, } } } @@ -37,10 +45,11 @@ impl From for ProtoArrayForkChoice { fn from(from: SszContainer) -> Self { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, - justified_epoch: from.justified_epoch, - finalized_epoch: from.finalized_epoch, + justified_checkpoint: from.justified_checkpoint, + finalized_checkpoint: from.finalized_checkpoint, nodes: from.nodes, indices: from.indices.into_iter().collect::>(), + previous_proposer_boost: from.previous_proposer_boost, }; Self { diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml index 2cda517a6a..965a63c60d 100644 --- a/consensus/serde_utils/Cargo.toml +++ b/consensus/serde_utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_serde_utils" -version = "0.1.0" +version = "0.1.1" authors = ["Paul Hauner "] edition = "2018" description = "Serialization and deserialization utilities useful for JSON representations of Ethereum 2.0 types." @@ -10,6 +10,7 @@ license = "Apache-2.0" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" hex = "0.4.2" +ethereum-types = "0.12.1" [dev-dependencies] serde_json = "1.0.58" diff --git a/consensus/serde_utils/src/bytes_4_hex.rs b/consensus/serde_utils/src/bytes_4_hex.rs deleted file mode 100644 index e057d1a128..0000000000 --- a/consensus/serde_utils/src/bytes_4_hex.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! Formats `[u8; 4]` as a 0x-prefixed hex string. -//! -//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`. - -use crate::hex::PrefixedHexVisitor; -use serde::de::Error; -use serde::{Deserializer, Serializer}; - -const BYTES_LEN: usize = 4; - -pub fn serialize(bytes: &[u8; BYTES_LEN], serializer: S) -> Result -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_str(PrefixedHexVisitor)?; - - if decoded.len() != BYTES_LEN { - return Err(D::Error::custom(format!( - "expected {} bytes for array, got {}", - BYTES_LEN, - decoded.len() - ))); - } - - let mut array = [0; BYTES_LEN]; - array.copy_from_slice(&decoded); - Ok(array) -} diff --git a/consensus/serde_utils/src/fixed_bytes_hex.rs b/consensus/serde_utils/src/fixed_bytes_hex.rs new file mode 100644 index 0000000000..4e9dc98aca --- /dev/null +++ b/consensus/serde_utils/src/fixed_bytes_hex.rs @@ -0,0 +1,52 @@ +//! Formats `[u8; n]` as a 0x-prefixed hex string. +//! +//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`. + +use crate::hex::PrefixedHexVisitor; +use serde::de::Error; +use serde::{Deserializer, Serializer}; + +macro_rules! bytes_hex { + ($num_bytes: tt) => { + use super::*; + + const BYTES_LEN: usize = $num_bytes; + + pub fn serialize(bytes: &[u8; BYTES_LEN], serializer: S) -> Result + where + S: Serializer, + { + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> + where + D: Deserializer<'de>, + { + let decoded = deserializer.deserialize_str(PrefixedHexVisitor)?; + + if decoded.len() != BYTES_LEN { + return Err(D::Error::custom(format!( + "expected {} bytes for array, got {}", + BYTES_LEN, + decoded.len() + ))); + } + + let mut array = [0; BYTES_LEN]; + array.copy_from_slice(&decoded); + Ok(array) + } + }; +} + +pub mod bytes_4_hex { + bytes_hex!(4); +} + +pub mod bytes_8_hex { + bytes_hex!(8); +} diff --git a/consensus/serde_utils/src/hex.rs b/consensus/serde_utils/src/hex.rs index 647b0ecfb5..1e6c02427f 100644 --- a/consensus/serde_utils/src/hex.rs +++ b/consensus/serde_utils/src/hex.rs @@ -6,6 +6,7 @@ use std::fmt; /// Encode `data` as a 0x-prefixed hex string. pub fn encode>(data: T) -> String { let hex = hex::encode(data); + let mut s = "0x".to_string(); s.push_str(hex.as_str()); s @@ -33,12 +34,7 @@ impl<'de> Visitor<'de> for PrefixedHexVisitor { where E: de::Error, { - if let Some(stripped) = value.strip_prefix("0x") { - Ok(hex::decode(stripped) - .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))?) - } else { - Err(de::Error::custom("missing 0x prefix")) - } + decode(value).map_err(de::Error::custom) } } diff --git a/consensus/serde_utils/src/hex_vec.rs b/consensus/serde_utils/src/hex_vec.rs new file mode 100644 index 0000000000..60d6494434 --- /dev/null +++ b/consensus/serde_utils/src/hex_vec.rs @@ -0,0 +1,23 @@ +//! Formats `Vec` as a 0x-prefixed hex string. +//! +//! E.g., `vec![0, 1, 2, 3]` serializes as `"0x00010203"`. + +use crate::hex::PrefixedHexVisitor; +use serde::{Deserializer, Serializer}; + +pub fn serialize(bytes: &[u8], serializer: S) -> Result +where + S: Serializer, +{ + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + deserializer.deserialize_str(PrefixedHexVisitor) +} diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 0016e67a3d..87179997e3 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -1,9 +1,13 @@ mod quoted_int; -pub mod bytes_4_hex; +pub mod fixed_bytes_hex; pub mod hex; +pub mod hex_vec; +pub mod list_of_bytes_lists; pub mod quoted_u64_vec; pub mod u32_hex; +pub mod u64_hex_be; pub mod u8_hex; -pub use quoted_int::{quoted_u32, quoted_u64, quoted_u8}; +pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex}; +pub use quoted_int::{quoted_u256, quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/list_of_bytes_lists.rs b/consensus/serde_utils/src/list_of_bytes_lists.rs new file mode 100644 index 0000000000..b93321aa06 --- /dev/null +++ b/consensus/serde_utils/src/list_of_bytes_lists.rs @@ -0,0 +1,49 @@ +//! Formats `Vec` using quotes. +//! +//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. +//! +//! Quotes can be optional during decoding. + +use crate::hex; +use serde::ser::SerializeSeq; +use serde::{de, Deserializer, Serializer}; + +pub struct ListOfBytesListVisitor; +impl<'a> serde::de::Visitor<'a> for ListOfBytesListVisitor { + type Value = Vec>; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a list of 0x-prefixed byte lists") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'a>, + { + let mut vec = vec![]; + + while let Some(val) = seq.next_element::()? { + vec.push(hex::decode(&val).map_err(de::Error::custom)?); + } + + Ok(vec) + } +} + +pub fn serialize(value: &[Vec], serializer: S) -> Result +where + S: Serializer, +{ + let mut seq = serializer.serialize_seq(Some(value.len()))?; + for val in value { + seq.serialize_element(&hex::encode(val))?; + } + seq.end() +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + deserializer.deserialize_any(ListOfBytesListVisitor) +} diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs index 5c3fa0f0aa..822acb5ee8 100644 --- a/consensus/serde_utils/src/quoted_int.rs +++ b/consensus/serde_utils/src/quoted_int.rs @@ -4,6 +4,7 @@ //! //! Quotes can be optional during decoding. +use ethereum_types::U256; use serde::{Deserializer, Serializer}; use serde_derive::{Deserialize, Serialize}; use std::convert::TryFrom; @@ -56,6 +57,17 @@ macro_rules! define_mod { } } + /// Compositional wrapper type that allows quotes or no quotes. + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] + #[serde(transparent)] + pub struct MaybeQuoted + where + T: From<$int> + Into<$int> + Copy + TryFrom, + { + #[serde(with = "self")] + pub value: T, + } + /// Wrapper type for requiring quotes on a `$int`-like type. /// /// Unlike using `serde(with = "quoted_$int::require_quotes")` this is composable, and can be nested @@ -70,17 +82,6 @@ macro_rules! define_mod { pub value: T, } - /// Compositional wrapper type that allows quotes or no quotes. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] - #[serde(transparent)] - pub struct MaybeQuoted - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - #[serde(with = "self")] - pub value: T, - } - /// Serialize with quotes. pub fn serialize(value: &T, serializer: S) -> Result where @@ -153,3 +154,66 @@ pub mod quoted_u64 { define_mod!(u64, visit_u64); } + +pub mod quoted_u256 { + use super::*; + + struct U256Visitor; + + impl<'de> serde::de::Visitor<'de> for U256Visitor { + type Value = U256; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a quoted U256 integer") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + U256::from_dec_str(v).map_err(serde::de::Error::custom) + } + } + + /// Serialize with quotes. + pub fn serialize(value: &U256, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&format!("{}", value)) + } + + /// Deserialize with quotes. + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_str(U256Visitor) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct WrappedU256(#[serde(with = "quoted_u256")] U256); + + #[test] + fn u256_with_quotes() { + assert_eq!( + &serde_json::to_string(&WrappedU256(U256::one())).unwrap(), + "\"1\"" + ); + assert_eq!( + serde_json::from_str::("\"1\"").unwrap(), + WrappedU256(U256::one()) + ); + } + + #[test] + fn u256_without_quotes() { + serde_json::from_str::("1").unwrap_err(); + } +} diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs new file mode 100644 index 0000000000..145292f8c3 --- /dev/null +++ b/consensus/serde_utils/src/u64_hex_be.rs @@ -0,0 +1,134 @@ +//! Formats `u64` as a 0x-prefixed, big-endian hex string. +//! +//! E.g., `0` serializes as `"0x0000000000000000"`. + +use serde::de::{self, Error, Visitor}; +use serde::{Deserializer, Serializer}; +use std::fmt; + +const BYTES_LEN: usize = 8; + +pub struct QuantityVisitor; +impl<'de> Visitor<'de> for QuantityVisitor { + type Value = Vec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a hex string") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + if !value.starts_with("0x") { + return Err(de::Error::custom("must start with 0x")); + } + + let stripped = value.trim_start_matches("0x"); + + if stripped.is_empty() { + Err(de::Error::custom(format!( + "quantity cannot be {}", + stripped + ))) + } else if stripped == "0" { + Ok(vec![0]) + } else if stripped.starts_with('0') { + Err(de::Error::custom("cannot have leading zero")) + } else if stripped.len() % 2 != 0 { + hex::decode(&format!("0{}", stripped)) + .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) + } else { + hex::decode(&stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) + } + } +} + +pub fn serialize(num: &u64, serializer: S) -> Result +where + S: Serializer, +{ + let raw = hex::encode(num.to_be_bytes()); + let trimmed = raw.trim_start_matches('0'); + + let hex = if trimmed.is_empty() { "0" } else { &trimmed }; + + serializer.serialize_str(&format!("0x{}", &hex)) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let decoded = deserializer.deserialize_str(QuantityVisitor)?; + + // TODO: this is not strict about byte length like other methods. + if decoded.len() > BYTES_LEN { + return Err(D::Error::custom(format!( + "expected max {} bytes for array, got {}", + BYTES_LEN, + decoded.len() + ))); + } + + let mut array = [0; BYTES_LEN]; + array[BYTES_LEN - decoded.len()..].copy_from_slice(&decoded); + Ok(u64::from_be_bytes(array)) +} + +#[cfg(test)] +mod test { + use serde::{Deserialize, Serialize}; + use serde_json; + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct Wrapper { + #[serde(with = "super")] + val: u64, + } + + #[test] + fn encoding() { + assert_eq!( + &serde_json::to_string(&Wrapper { val: 0 }).unwrap(), + "\"0x0\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 1 }).unwrap(), + "\"0x1\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 256 }).unwrap(), + "\"0x100\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 65 }).unwrap(), + "\"0x41\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 1024 }).unwrap(), + "\"0x400\"" + ); + } + + #[test] + fn decoding() { + assert_eq!( + serde_json::from_str::("\"0x0\"").unwrap(), + Wrapper { val: 0 }, + ); + assert_eq!( + serde_json::from_str::("\"0x41\"").unwrap(), + Wrapper { val: 65 }, + ); + assert_eq!( + serde_json::from_str::("\"0x400\"").unwrap(), + Wrapper { val: 1024 }, + ); + serde_json::from_str::("\"0x\"").unwrap_err(); + serde_json::from_str::("\"0x0400\"").unwrap_err(); + serde_json::from_str::("\"400\"").unwrap_err(); + serde_json::from_str::("\"ff\"").unwrap_err(); + } +} diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml index 853fd7232c..555017daae 100644 --- a/consensus/ssz/Cargo.toml +++ b/consensus/ssz/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_ssz" -version = "0.4.0" +version = "0.4.1" authors = ["Paul Hauner "] edition = "2018" description = "SimpleSerialize (SSZ) as used in Ethereum 2.0" diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index 29b2aec8e4..0e6b390830 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -1,6 +1,6 @@ use super::*; use core::num::NonZeroUsize; -use ethereum_types::{H256, U128, U256}; +use ethereum_types::{H160, H256, U128, U256}; use smallvec::SmallVec; use std::sync::Arc; @@ -256,6 +256,27 @@ impl Decode for Arc { } } +impl Decode for H160 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 20 + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let expected = ::ssz_fixed_len(); + + if len != expected { + Err(DecodeError::InvalidByteLength { len, expected }) + } else { + Ok(Self::from_slice(bytes)) + } + } +} + impl Decode for H256 { fn is_ssz_fixed_len() -> bool { true diff --git a/consensus/ssz/src/encode.rs b/consensus/ssz/src/encode.rs index cecd615a86..a46ef80e05 100644 --- a/consensus/ssz/src/encode.rs +++ b/consensus/ssz/src/encode.rs @@ -113,7 +113,7 @@ impl<'a> SszEncoder<'a> { F: Fn(&mut Vec), { if is_ssz_fixed_len { - ssz_append(&mut self.buf); + ssz_append(self.buf); } else { self.buf .extend_from_slice(&encode_length(self.offset + self.variable_bytes.len())); @@ -129,7 +129,7 @@ impl<'a> SszEncoder<'a> { pub fn finalize(&mut self) -> &mut Vec { self.buf.append(&mut self.variable_bytes); - &mut self.buf + self.buf } } diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index 00d3e0a3a0..5728685d01 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -1,6 +1,6 @@ use super::*; use core::num::NonZeroUsize; -use ethereum_types::{H256, U128, U256}; +use ethereum_types::{H160, H256, U128, U256}; use smallvec::SmallVec; use std::sync::Arc; @@ -305,6 +305,24 @@ impl Encode for NonZeroUsize { } } +impl Encode for H160 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 20 + } + + fn ssz_bytes_len(&self) -> usize { + 20 + } + + fn ssz_append(&self, buf: &mut Vec) { + buf.extend_from_slice(self.as_bytes()); + } +} + impl Encode for H256 { fn is_ssz_fixed_len() -> bool { true diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml index 2b6de52dd1..b71de4ccdb 100644 --- a/consensus/ssz_types/Cargo.toml +++ b/consensus/ssz_types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_ssz_types" -version = "0.2.1" +version = "0.2.2" authors = ["Paul Hauner "] edition = "2018" description = "Provides types with unique properties required for SSZ serialization and Merklization." @@ -10,13 +10,14 @@ license = "Apache-2.0" name = "ssz_types" [dependencies] -tree_hash = "0.4.0" +tree_hash = "0.4.1" serde = "1.0.116" serde_derive = "1.0.116" -eth2_serde_utils = "0.1.0" -eth2_ssz = "0.4.0" +eth2_serde_utils = "0.1.1" +eth2_ssz = "0.4.1" typenum = "1.12.0" arbitrary = { version = "1.0", features = ["derive"], optional = true } +derivative = "2.1.1" [dev-dependencies] serde_json = "1.0.58" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index afecd8ce7d..dfad3aedcb 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -1,6 +1,7 @@ use crate::tree_hash::bitfield_bytes_tree_hash_root; use crate::Error; use core::marker::PhantomData; +use derivative::Derivative; use eth2_serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; @@ -87,7 +88,8 @@ pub type BitVector = Bitfield>; /// The internal representation of the bitfield is the same as that required by SSZ. The lowest /// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest /// bit-index. E.g., `vec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq, Hash(bound = ""))] pub struct Bitfield { bytes: Vec, len: usize, diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index 8b8d660fb9..ca5d40f14f 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -1,5 +1,6 @@ use crate::tree_hash::vec_tree_hash_root; use crate::Error; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; use std::ops::{Deref, DerefMut, Index, IndexMut}; @@ -44,7 +45,8 @@ pub use typenum; /// let long: FixedVector<_, typenum::U5> = FixedVector::from(base); /// assert_eq!(&long[..], &[1, 2, 3, 4, 0]); /// ``` -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: std::hash::Hash"))] #[serde(transparent)] pub struct FixedVector { vec: Vec, diff --git a/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs new file mode 100644 index 0000000000..86077891bc --- /dev/null +++ b/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs @@ -0,0 +1,22 @@ +use crate::FixedVector; +use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; +use serde::{Deserializer, Serializer}; +use typenum::Unsigned; + +pub fn serialize(bytes: &FixedVector, serializer: S) -> Result +where + S: Serializer, + U: Unsigned, +{ + serializer.serialize_str(&hex::encode(&bytes[..])) +} + +pub fn deserialize<'de, D, U>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, + U: Unsigned, +{ + let vec = deserializer.deserialize_string(PrefixedHexVisitor)?; + FixedVector::new(vec) + .map_err(|e| serde::de::Error::custom(format!("invalid fixed vector: {:?}", e))) +} diff --git a/consensus/ssz_types/src/serde_utils/hex_var_list.rs b/consensus/ssz_types/src/serde_utils/hex_var_list.rs new file mode 100644 index 0000000000..e3a3a14e06 --- /dev/null +++ b/consensus/ssz_types/src/serde_utils/hex_var_list.rs @@ -0,0 +1,23 @@ +//! Serialize `VariableList` as 0x-prefixed hex string. +use crate::VariableList; +use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; +use serde::{Deserializer, Serializer}; +use typenum::Unsigned; + +pub fn serialize(bytes: &VariableList, serializer: S) -> Result +where + S: Serializer, + N: Unsigned, +{ + serializer.serialize_str(&hex::encode(&**bytes)) +} + +pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, + N: Unsigned, +{ + let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; + VariableList::new(bytes) + .map_err(|e| serde::de::Error::custom(format!("invalid variable list: {:?}", e))) +} diff --git a/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs b/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs new file mode 100644 index 0000000000..e2fd8ddf32 --- /dev/null +++ b/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs @@ -0,0 +1,77 @@ +//! Serialize `VaraibleList, N>` as list of 0x-prefixed hex string. +use crate::VariableList; +use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer}; +use std::marker::PhantomData; +use typenum::Unsigned; + +#[derive(Deserialize)] +#[serde(transparent)] +pub struct WrappedListOwned( + #[serde(with = "crate::serde_utils::hex_var_list")] VariableList, +); + +#[derive(Serialize)] +#[serde(transparent)] +pub struct WrappedListRef<'a, N: Unsigned>( + #[serde(with = "crate::serde_utils::hex_var_list")] &'a VariableList, +); + +pub fn serialize( + list: &VariableList, N>, + serializer: S, +) -> Result +where + S: Serializer, + M: Unsigned, + N: Unsigned, +{ + let mut seq = serializer.serialize_seq(Some(list.len()))?; + for bytes in list { + seq.serialize_element(&WrappedListRef(bytes))?; + } + seq.end() +} + +#[derive(Default)] +pub struct Visitor { + _phantom_m: PhantomData, + _phantom_n: PhantomData, +} + +impl<'a, M, N> serde::de::Visitor<'a> for Visitor +where + M: Unsigned, + N: Unsigned, +{ + type Value = VariableList, N>; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a list of 0x-prefixed hex bytes") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'a>, + { + let mut list: VariableList, N> = <_>::default(); + + while let Some(val) = seq.next_element::>()? { + list.push(val.0).map_err(|e| { + serde::de::Error::custom(format!("failed to push value to list: {:?}.", e)) + })?; + } + + Ok(list) + } +} + +pub fn deserialize<'de, D, M, N>( + deserializer: D, +) -> Result, N>, D::Error> +where + D: Deserializer<'de>, + M: Unsigned, + N: Unsigned, +{ + deserializer.deserialize_seq(Visitor::default()) +} diff --git a/consensus/ssz_types/src/serde_utils/mod.rs b/consensus/ssz_types/src/serde_utils/mod.rs index 2d315a0509..cd6d49cc85 100644 --- a/consensus/ssz_types/src/serde_utils/mod.rs +++ b/consensus/ssz_types/src/serde_utils/mod.rs @@ -1,2 +1,5 @@ +pub mod hex_fixed_vec; +pub mod hex_var_list; +pub mod list_of_hex_var_list; pub mod quoted_u64_fixed_vec; pub mod quoted_u64_var_list; diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 242a55b2c9..1414d12c8c 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -1,5 +1,6 @@ use crate::tree_hash::vec_tree_hash_root; use crate::Error; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; use std::ops::{Deref, DerefMut, Index, IndexMut}; @@ -46,7 +47,8 @@ pub use typenum; /// // Push a value to if it _does_ exceed the maximum. /// assert!(long.push(6).is_err()); /// ``` -#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] #[serde(transparent)] pub struct VariableList { vec: Vec, diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 755de44fac..8661751e10 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -12,11 +12,11 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" } bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" -eth2_ssz = "0.4.0" -eth2_ssz_types = "0.2.1" +eth2_ssz = "0.4.1" +eth2_ssz_types = "0.2.2" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } -tree_hash = "0.4.0" +tree_hash = "0.4.1" types = { path = "../types", default-features = false } rayon = "1.4.1" eth2_hashing = "0.2.0" diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs new file mode 100644 index 0000000000..937348263b --- /dev/null +++ b/consensus/state_processing/src/block_replayer.rs @@ -0,0 +1,313 @@ +use crate::{ + per_block_processing, per_epoch_processing::EpochProcessingSummary, per_slot_processing, + BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, VerifyBlockRoot, +}; +use std::marker::PhantomData; +use types::{BeaconState, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; + +type PreBlockHook<'a, E, Error> = + Box, &SignedBeaconBlock) -> Result<(), Error> + 'a>; +type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; +type PreSlotHook<'a, E, Error> = Box) -> Result<(), Error> + 'a>; +type PostSlotHook<'a, E, Error> = Box< + dyn FnMut(&mut BeaconState, Option>, bool) -> Result<(), Error> + + 'a, +>; +type StateRootIterDefault = std::iter::Empty>; + +/// Efficiently apply blocks to a state while configuring various parameters. +/// +/// Usage follows a builder pattern. +pub struct BlockReplayer< + 'a, + Spec: EthSpec, + Error = BlockReplayError, + StateRootIter = StateRootIterDefault, +> { + state: BeaconState, + spec: &'a ChainSpec, + state_root_strategy: StateRootStrategy, + block_sig_strategy: BlockSignatureStrategy, + verify_block_root: Option, + pre_block_hook: Option>, + post_block_hook: Option>, + pre_slot_hook: Option>, + post_slot_hook: Option>, + state_root_iter: Option, + state_root_miss: bool, + _phantom: PhantomData, +} + +#[derive(Debug)] +pub enum BlockReplayError { + NoBlocks, + SlotProcessing(SlotProcessingError), + BlockProcessing(BlockProcessingError), +} + +impl From for BlockReplayError { + fn from(e: SlotProcessingError) -> Self { + Self::SlotProcessing(e) + } +} + +impl From for BlockReplayError { + fn from(e: BlockProcessingError) -> Self { + Self::BlockProcessing(e) + } +} + +/// Defines how state roots should be computed during block replay. +#[derive(PartialEq)] +pub enum StateRootStrategy { + /// Perform all transitions faithfully to the specification. + Accurate, + /// Don't compute state roots, eventually computing an invalid beacon state that can only be + /// used for obtaining shuffling. + Inconsistent, +} + +impl<'a, E, Error, StateRootIter> BlockReplayer<'a, E, Error, StateRootIter> +where + E: EthSpec, + StateRootIter: Iterator>, + Error: From, +{ + /// Create a new replayer that will apply blocks upon `state`. + /// + /// Defaults: + /// + /// - Full (bulk) signature verification + /// - Accurate state roots + /// - Full block root verification + pub fn new(state: BeaconState, spec: &'a ChainSpec) -> Self { + Self { + state, + spec, + state_root_strategy: StateRootStrategy::Accurate, + block_sig_strategy: BlockSignatureStrategy::VerifyBulk, + verify_block_root: Some(VerifyBlockRoot::True), + pre_block_hook: None, + post_block_hook: None, + pre_slot_hook: None, + post_slot_hook: None, + state_root_iter: None, + state_root_miss: false, + _phantom: PhantomData, + } + } + + /// Set the replayer's state root strategy different from the default. + pub fn state_root_strategy(mut self, state_root_strategy: StateRootStrategy) -> Self { + if state_root_strategy == StateRootStrategy::Inconsistent { + self.verify_block_root = None; + } + self.state_root_strategy = state_root_strategy; + self + } + + /// Set the replayer's block signature verification strategy. + pub fn block_signature_strategy(mut self, block_sig_strategy: BlockSignatureStrategy) -> Self { + self.block_sig_strategy = block_sig_strategy; + self + } + + /// Disable signature verification during replay. + /// + /// If you are truly _replaying_ blocks then you will almost certainly want to disable + /// signature checks for performance. + pub fn no_signature_verification(self) -> Self { + self.block_signature_strategy(BlockSignatureStrategy::NoVerification) + } + + /// Verify only the block roots of the initial few blocks, and trust the rest. + pub fn minimal_block_root_verification(mut self) -> Self { + self.verify_block_root = None; + self + } + + /// Supply a state root iterator to accelerate slot processing. + /// + /// If possible the state root iterator should return a state root for every slot from + /// `self.state.slot` to the `target_slot` supplied to `apply_blocks` (inclusive of both + /// endpoints). + pub fn state_root_iter(mut self, iter: StateRootIter) -> Self { + self.state_root_iter = Some(iter); + self + } + + /// Run a function immediately before each block that is applied during `apply_blocks`. + /// + /// This can be used to inspect the state as blocks are applied. + pub fn pre_block_hook(mut self, hook: PreBlockHook<'a, E, Error>) -> Self { + self.pre_block_hook = Some(hook); + self + } + + /// Run a function immediately after each block that is applied during `apply_blocks`. + /// + /// This can be used to inspect the state as blocks are applied. + pub fn post_block_hook(mut self, hook: PostBlockHook<'a, E, Error>) -> Self { + self.post_block_hook = Some(hook); + self + } + + /// Run a function immediately before slot processing advances the state to the next slot. + pub fn pre_slot_hook(mut self, hook: PreSlotHook<'a, E, Error>) -> Self { + self.pre_slot_hook = Some(hook); + self + } + + /// Run a function immediately after slot processing has advanced the state to the next slot. + /// + /// The hook receives the state and a bool indicating if this state corresponds to a skipped + /// slot (i.e. it will not have a block applied). + pub fn post_slot_hook(mut self, hook: PostSlotHook<'a, E, Error>) -> Self { + self.post_slot_hook = Some(hook); + self + } + + /// Compute the state root for `slot` as efficiently as possible. + /// + /// The `blocks` should be the full list of blocks being applied and `i` should be the index of + /// the next block that will be applied, or `blocks.len()` if all blocks have already been + /// applied. + fn get_state_root( + &mut self, + slot: Slot, + blocks: &[SignedBeaconBlock], + i: usize, + ) -> Result, Error> { + // If we don't care about state roots then return immediately. + if self.state_root_strategy == StateRootStrategy::Inconsistent { + return Ok(Some(Hash256::zero())); + } + + // If a state root iterator is configured, use it to find the root. + if let Some(ref mut state_root_iter) = self.state_root_iter { + let opt_root = state_root_iter + .take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot)) + .find(|res| res.as_ref().map_or(true, |(_, s)| *s == slot)) + .transpose()?; + + if let Some((root, _)) = opt_root { + return Ok(Some(root)); + } + } + + // Otherwise try to source a root from the previous block. + if let Some(prev_i) = i.checked_sub(1) { + if let Some(prev_block) = blocks.get(prev_i) { + if prev_block.slot() == slot { + return Ok(Some(prev_block.state_root())); + } + } + } + + self.state_root_miss = true; + Ok(None) + } + + /// Apply `blocks` atop `self.state`, taking care of slot processing. + /// + /// If `target_slot` is provided then the state will be advanced through to `target_slot` + /// after the blocks have been applied. + pub fn apply_blocks( + mut self, + blocks: Vec>, + target_slot: Option, + ) -> Result { + for (i, block) in blocks.iter().enumerate() { + // Allow one additional block at the start which is only used for its state root. + if i == 0 && block.slot() <= self.state.slot() { + continue; + } + + while self.state.slot() < block.slot() { + if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { + pre_slot_hook(&mut self.state)?; + } + + let state_root = self.get_state_root(self.state.slot(), &blocks, i)?; + let summary = per_slot_processing(&mut self.state, state_root, self.spec) + .map_err(BlockReplayError::from)?; + + if let Some(ref mut post_slot_hook) = self.post_slot_hook { + let is_skipped_slot = self.state.slot() < block.slot(); + post_slot_hook(&mut self.state, summary, is_skipped_slot)?; + } + } + + if let Some(ref mut pre_block_hook) = self.pre_block_hook { + pre_block_hook(&mut self.state, block)?; + } + + let verify_block_root = self.verify_block_root.unwrap_or_else(|| { + // If no explicit policy is set, verify only the first 1 or 2 block roots if using + // accurate state roots. Inaccurate state roots require block root verification to + // be off. + if i <= 1 && self.state_root_strategy == StateRootStrategy::Accurate { + VerifyBlockRoot::True + } else { + VerifyBlockRoot::False + } + }); + per_block_processing( + &mut self.state, + block, + None, + self.block_sig_strategy, + verify_block_root, + self.spec, + ) + .map_err(BlockReplayError::from)?; + + if let Some(ref mut post_block_hook) = self.post_block_hook { + post_block_hook(&mut self.state, block)?; + } + } + + if let Some(target_slot) = target_slot { + while self.state.slot() < target_slot { + if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { + pre_slot_hook(&mut self.state)?; + } + + let state_root = self.get_state_root(self.state.slot(), &blocks, blocks.len())?; + let summary = per_slot_processing(&mut self.state, state_root, self.spec) + .map_err(BlockReplayError::from)?; + + if let Some(ref mut post_slot_hook) = self.post_slot_hook { + // No more blocks to apply (from our perspective) so we consider these slots + // skipped. + let is_skipped_slot = true; + post_slot_hook(&mut self.state, summary, is_skipped_slot)?; + } + } + } + + Ok(self) + } + + /// After block application, check if a state root miss occurred. + pub fn state_root_miss(&self) -> bool { + self.state_root_miss + } + + /// Convert the replayer into the state that was built. + pub fn into_state(self) -> BeaconState { + self.state + } +} + +impl<'a, E, Error> BlockReplayer<'a, E, Error, StateRootIterDefault> +where + E: EthSpec, + Error: From, +{ + /// If type inference fails to infer the state root iterator type you can use this method + /// to hint that no state root iterator is desired. + pub fn no_state_root_iter(self) -> Self { + self + } +} diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 4867706730..5a03ea0322 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -34,14 +34,11 @@ pub fn slash_validator( .safe_add(validator_effective_balance)?, )?; - let min_slashing_penalty_quotient = match state { - BeaconState::Base(_) => spec.min_slashing_penalty_quotient, - BeaconState::Altair(_) => spec.min_slashing_penalty_quotient_altair, - }; decrease_balance( state, slashed_index, - validator_effective_balance.safe_div(min_slashing_penalty_quotient)?, + validator_effective_balance + .safe_div(spec.min_slashing_penalty_quotient_for_state(state))?, )?; // Apply proposer and whistleblower rewards @@ -51,7 +48,7 @@ pub fn slash_validator( validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?; let proposer_reward = match state { BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) => whistleblower_reward + BeaconState::Altair(_) | BeaconState::Merge(_) => whistleblower_reward .safe_mul(PROPOSER_WEIGHT)? .safe_div(WEIGHT_DENOMINATOR)?, }; diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 0b72bd5bb2..44c008276e 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -2,7 +2,7 @@ use super::per_block_processing::{ errors::BlockProcessingError, process_operations::process_deposit, }; use crate::common::DepositDataTree; -use crate::upgrade::upgrade_to_altair; +use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use safe_arith::{ArithError, SafeArith}; use tree_hash::TreeHash; use types::DEPOSIT_TREE_DEPTH; @@ -13,6 +13,7 @@ pub fn initialize_beacon_state_from_eth1( eth1_block_hash: Hash256, eth1_timestamp: u64, deposits: Vec, + execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, BlockProcessingError> { let genesis_time = eth2_genesis_time(eth1_timestamp, spec)?; @@ -46,12 +47,31 @@ pub fn initialize_beacon_state_from_eth1( // use of `BeaconBlock::empty` in `BeaconState::new` is sufficient to correctly initialise // the `latest_block_header` as per: // https://github.com/ethereum/eth2.0-specs/pull/2323 - if spec.fork_name_at_epoch(state.current_epoch()) == ForkName::Altair { + if spec + .altair_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { upgrade_to_altair(&mut state, spec)?; state.fork_mut().previous_version = spec.altair_fork_version; } + // Similarly, perform an upgrade to the merge if configured from genesis. + if spec + .bellatrix_fork_epoch + .map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch()) + { + upgrade_to_bellatrix(&mut state, spec)?; + + // Remove intermediate Altair fork from `state.fork`. + state.fork_mut().previous_version = spec.bellatrix_fork_version; + + // Override latest execution payload header. + // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing + *state.latest_execution_payload_header_mut()? = + execution_payload_header.unwrap_or_default(); + } + // Now that we have our validators, initialize the caches (including the committees) state.build_all_caches(spec)?; diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 18fee2e2c3..cb4ffee780 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -16,6 +16,7 @@ mod macros; mod metrics; +pub mod block_replayer; pub mod common; pub mod genesis; pub mod per_block_processing; @@ -25,13 +26,14 @@ pub mod state_advance; pub mod upgrade; pub mod verify_operation; +pub use block_replayer::{BlockReplayError, BlockReplayer, StateRootStrategy}; pub use genesis::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, process_activations, }; pub use per_block_processing::{ block_signature_verifier, errors::BlockProcessingError, per_block_processing, signature_sets, - BlockSignatureStrategy, BlockSignatureVerifier, VerifySignatures, + BlockSignatureStrategy, BlockSignatureVerifier, VerifyBlockRoot, VerifySignatures, }; pub use per_epoch_processing::{ errors::EpochProcessingError, process_epoch as per_epoch_processing, diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 845aee747b..857c776332 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -68,6 +68,14 @@ impl VerifySignatures { } } +/// Control verification of the latest block header. +#[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] +#[derive(PartialEq, Clone, Copy)] +pub enum VerifyBlockRoot { + True, + False, +} + /// Updates the state for a new block, whilst validating that the block is valid, optionally /// checking the block proposer signature. /// @@ -84,6 +92,7 @@ pub fn per_block_processing( signed_block: &SignedBeaconBlock, block_root: Option, block_signature_strategy: BlockSignatureStrategy, + verify_block_root: VerifyBlockRoot, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { let block = signed_block.message(); @@ -120,7 +129,7 @@ pub fn per_block_processing( BlockSignatureStrategy::VerifyRandao => VerifySignatures::False, }; - let proposer_index = process_block_header(state, block, spec)?; + let proposer_index = process_block_header(state, block, verify_block_root, spec)?; if verify_signatures.is_true() { verify_block_signature(state, signed_block, block_root, spec)?; @@ -135,14 +144,22 @@ pub fn per_block_processing( state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; + // The call to the `process_execution_payload` must happen before the call to the + // `process_randao` as the former depends on the `randao_mix` computed with the reveal of the + // previous block. + if is_execution_enabled(state, block.body()) { + let payload = block.body().execution_payload()?; + process_execution_payload(state, payload, spec)?; + } + process_randao(state, block, verify_randao, spec)?; process_eth1_data(state, block.body().eth1_data())?; process_operations(state, block.body(), proposer_index, verify_signatures, spec)?; - if let BeaconBlockRef::Altair(inner) = block { + if let Ok(sync_aggregate) = block.body().sync_aggregate() { process_sync_aggregate( state, - &inner.body.sync_aggregate, + sync_aggregate, proposer_index, verify_signatures, spec, @@ -156,6 +173,7 @@ pub fn per_block_processing( pub fn process_block_header( state: &mut BeaconState, block: BeaconBlockRef<'_, T>, + verify_block_root: VerifyBlockRoot, spec: &ChainSpec, ) -> Result> { // Verify that the slots match @@ -184,14 +202,16 @@ pub fn process_block_header( } ); - let expected_previous_block_root = state.latest_block_header().tree_hash_root(); - verify!( - block.parent_root() == expected_previous_block_root, - HeaderInvalid::ParentBlockRootMismatch { - state: expected_previous_block_root, - block: block.parent_root(), - } - ); + if verify_block_root == VerifyBlockRoot::True { + let expected_previous_block_root = state.latest_block_header().tree_hash_root(); + verify!( + block.parent_root() == expected_previous_block_root, + HeaderInvalid::ParentBlockRootMismatch { + state: expected_previous_block_root, + block: block.parent_root(), + } + ); + } *state.latest_block_header_mut() = block.temporary_block_header(); @@ -283,3 +303,122 @@ pub fn get_new_eth1_data( Ok(None) } } + +/// Performs *partial* verification of the `payload`. +/// +/// The verification is partial, since the execution payload is not verified against an execution +/// engine. That is expected to be performed by an upstream function. +/// +/// ## Specification +/// +/// Contains a partial set of checks from the `process_execution_payload` function: +/// +/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload +pub fn partially_verify_execution_payload( + state: &BeaconState, + payload: &ExecutionPayload, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + if is_merge_transition_complete(state) { + block_verify!( + payload.parent_hash == state.latest_execution_payload_header()?.block_hash, + BlockProcessingError::ExecutionHashChainIncontiguous { + expected: state.latest_execution_payload_header()?.block_hash, + found: payload.parent_hash, + } + ); + } + block_verify!( + payload.random == *state.get_randao_mix(state.current_epoch())?, + BlockProcessingError::ExecutionRandaoMismatch { + expected: *state.get_randao_mix(state.current_epoch())?, + found: payload.random, + } + ); + + let timestamp = compute_timestamp_at_slot(state, spec)?; + block_verify!( + payload.timestamp == timestamp, + BlockProcessingError::ExecutionInvalidTimestamp { + expected: timestamp, + found: payload.timestamp, + } + ); + + Ok(()) +} + +/// Calls `partially_verify_execution_payload` and then updates the payload header in the `state`. +/// +/// ## Specification +/// +/// Partially equivalent to the `process_execution_payload` function: +/// +/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload +pub fn process_execution_payload( + state: &mut BeaconState, + payload: &ExecutionPayload, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + partially_verify_execution_payload(state, payload, spec)?; + + *state.latest_execution_payload_header_mut()? = ExecutionPayloadHeader { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipt_root: payload.receipt_root, + logs_bloom: payload.logs_bloom.clone(), + random: payload.random, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + }; + + Ok(()) +} + +/// These functions will definitely be called before the merge. Their entire purpose is to check if +/// the merge has happened or if we're on the transition block. Thus we don't want to propagate +/// errors from the `BeaconState` being an earlier variant than `BeaconStateMerge` as we'd have to +/// repeaetedly write code to treat these errors as false. +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_merge_transition_complete +pub fn is_merge_transition_complete(state: &BeaconState) -> bool { + state + .latest_execution_payload_header() + .map(|header| *header != >::default()) + .unwrap_or(false) +} +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_merge_transition_block +pub fn is_merge_transition_block( + state: &BeaconState, + body: BeaconBlockBodyRef, +) -> bool { + body.execution_payload() + .map(|payload| { + !is_merge_transition_complete(state) && *payload != >::default() + }) + .unwrap_or(false) +} +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_execution_enabled +pub fn is_execution_enabled( + state: &BeaconState, + body: BeaconBlockBodyRef, +) -> bool { + is_merge_transition_block(state, body) || is_merge_transition_complete(state) +} + +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#compute_timestamp_at_slot +pub fn compute_timestamp_at_slot( + state: &BeaconState, + spec: &ChainSpec, +) -> Result { + let slots_since_genesis = state.slot().as_u64().safe_sub(spec.genesis_slot.as_u64())?; + slots_since_genesis + .safe_mul(spec.seconds_per_slot) + .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) +} diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 3e7a799341..28044a462c 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -302,7 +302,7 @@ where /// Include the signature of the block's sync aggregate (if it exists) for verification. pub fn include_sync_aggregate(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { - if let Some(sync_aggregate) = block.message().body().sync_aggregate() { + if let Ok(sync_aggregate) = block.message().body().sync_aggregate() { if let Some(signature_set) = sync_aggregate_signature_set( &self.decompressor, sync_aggregate, diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index c4bdaf3c7b..21c3024347 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -57,6 +57,19 @@ pub enum BlockProcessingError { ArithError(ArithError), InconsistentBlockFork(InconsistentFork), InconsistentStateFork(InconsistentFork), + ExecutionHashChainIncontiguous { + expected: Hash256, + found: Hash256, + }, + ExecutionRandaoMismatch { + expected: Hash256, + found: Hash256, + }, + ExecutionInvalidTimestamp { + expected: u64, + found: u64, + }, + ExecutionInvalid, #[cfg(feature = "milhouse")] MilhouseError(milhouse::Error), } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index f2cef47d6f..0cdf54a6c8 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -228,7 +228,7 @@ pub fn process_attestations<'a, T: EthSpec>( BeaconBlockBodyRef::Base(_) => { base::process_attestations(state, block_body.attestations(), verify_signatures, spec)?; } - BeaconBlockBodyRef::Altair(_) => { + BeaconBlockBodyRef::Altair(_) | BeaconBlockBodyRef::Merge(_) => { altair::process_attestations( state, block_body.attestations(), @@ -353,15 +353,15 @@ pub fn process_deposit( state.validators_mut().push(validator)?; state.balances_mut().push(deposit.data.amount)?; - // Altair-specific initializations. - if let BeaconState::Altair(altair_state) = state { - altair_state - .previous_epoch_participation - .push(ParticipationFlags::default())?; - altair_state - .current_epoch_participation - .push(ParticipationFlags::default())?; - altair_state.inactivity_scores.push(0)?; + // Altair or later initializations. + if let Ok(previous_epoch_participation) = state.previous_epoch_participation_mut() { + previous_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(current_epoch_participation) = state.current_epoch_participation_mut() { + current_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(inactivity_scores) = state.inactivity_scores_mut() { + inactivity_scores.push(0)?; } } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 78c034caac..b75a79c72e 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -6,7 +6,10 @@ use crate::per_block_processing::errors::{ DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; -use crate::{per_block_processing::process_operations, BlockSignatureStrategy, VerifySignatures}; +use crate::{ + per_block_processing::process_operations, BlockSignatureStrategy, VerifyBlockRoot, + VerifySignatures, +}; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use lazy_static::lazy_static; use ssz_types::Bitfield; @@ -65,6 +68,7 @@ fn valid_block_ok() { &block, None, BlockSignatureStrategy::VerifyIndividual, + VerifyBlockRoot::True, &spec, ); @@ -88,6 +92,7 @@ fn invalid_block_header_state_slot() { &SignedBeaconBlock::from_block(block, signature), None, BlockSignatureStrategy::VerifyIndividual, + VerifyBlockRoot::True, &spec, ); @@ -116,6 +121,7 @@ fn invalid_parent_block_root() { &SignedBeaconBlock::from_block(block, signature), None, BlockSignatureStrategy::VerifyIndividual, + VerifyBlockRoot::True, &spec, ); @@ -145,6 +151,7 @@ fn invalid_block_signature() { &SignedBeaconBlock::from_block(block, Signature::empty()), None, BlockSignatureStrategy::VerifyIndividual, + VerifyBlockRoot::True, &spec, ); @@ -174,6 +181,7 @@ fn invalid_randao_reveal_signature() { &signed_block, None, BlockSignatureStrategy::VerifyIndividual, + VerifyBlockRoot::True, &spec, ); @@ -187,14 +195,13 @@ fn valid_4_deposits() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); let mut state = harness.get_current_state(); - let (deposits, mut state) = harness.make_deposits(&mut state, 4, None, None); + let (deposits, state) = harness.make_deposits(&mut state, 4, None, None); let deposits = VariableList::from(deposits); let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; *head_block.to_mut().body_mut().deposits_mut() = deposits; - let result = - process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); + let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); // Expecting Ok because these are valid deposits. assert_eq!(result, Ok(())); @@ -206,7 +213,7 @@ fn invalid_deposit_deposit_count_too_big() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); let mut state = harness.get_current_state(); - let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None); + let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; @@ -214,8 +221,7 @@ fn invalid_deposit_deposit_count_too_big() { let big_deposit_count = NUM_DEPOSITS + 1; state.eth1_data_mut().deposit_count = big_deposit_count; - let result = - process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); + let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); // Expecting DepositCountInvalid because we incremented the deposit_count assert_eq!( @@ -233,7 +239,7 @@ fn invalid_deposit_count_too_small() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); let mut state = harness.get_current_state(); - let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None); + let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; @@ -241,8 +247,7 @@ fn invalid_deposit_count_too_small() { let small_deposit_count = NUM_DEPOSITS - 1; state.eth1_data_mut().deposit_count = small_deposit_count; - let result = - process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); + let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); // Expecting DepositCountInvalid because we decremented the deposit_count assert_eq!( @@ -260,7 +265,7 @@ fn invalid_deposit_bad_merkle_proof() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); let mut state = harness.get_current_state(); - let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None); + let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; @@ -270,8 +275,7 @@ fn invalid_deposit_bad_merkle_proof() { // Manually offsetting deposit count and index to trigger bad merkle proof state.eth1_data_mut().deposit_count += 1; *state.eth1_deposit_index_mut() += 1; - let result = - process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); + let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); // Expecting BadMerkleProof because the proofs were created with different indices assert_eq!( @@ -289,15 +293,14 @@ fn invalid_deposit_wrong_sig() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); let mut state = harness.get_current_state(); - let (deposits, mut state) = + let (deposits, state) = harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty())); let deposits = VariableList::from(deposits); let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; *head_block.to_mut().body_mut().deposits_mut() = deposits; - let result = - process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); + let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); // Expecting Ok(()) even though the block signature does not correspond to the correct public key assert_eq!(result, Ok(())); } @@ -308,15 +311,14 @@ fn invalid_deposit_invalid_pub_key() { let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT); let mut state = harness.get_current_state(); - let (deposits, mut state) = + let (deposits, state) = harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None); let deposits = VariableList::from(deposits); let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; *head_block.to_mut().body_mut().deposits_mut() = deposits; - let result = - process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec); + let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); // Expecting Ok(()) even though we passed in invalid publickeybytes in the public key field of the deposit data. assert_eq!(result, Ok(())); diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 245876b86e..d813dc42fa 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -35,7 +35,7 @@ pub fn process_epoch( match state { BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) => altair::process_epoch(state, spec), + BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_epoch(state, spec), } } diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index 3acece267f..1011abe28f 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -47,7 +47,6 @@ pub fn process_epoch( process_slashings( state, participation_cache.current_epoch_total_active_balance(), - spec.proportional_slashing_multiplier_altair, spec, )?; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index 5906e0f8d2..b1c17851d1 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -119,7 +119,7 @@ pub fn get_inactivity_penalty_deltas( .safe_mul(state.get_inactivity_score(index)?)?; let penalty_denominator = spec .inactivity_score_bias - .safe_mul(spec.inactivity_penalty_quotient_altair)?; + .safe_mul(spec.inactivity_penalty_quotient_for_state(state))?; delta.penalize(penalty_numerator.safe_div(penalty_denominator)?)?; } deltas diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index 40eff3b404..4ae2207ff2 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -43,7 +43,6 @@ pub fn process_epoch( process_slashings( state, validator_statuses.total_balances.current_epoch(), - spec.proportional_slashing_multiplier, spec, )?; diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index c2d456c494..69807a5e5f 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -6,14 +6,15 @@ use types::{BeaconState, ChainSpec, EthSpec, GetBalanceMut, GetValidatorMut, Uns pub fn process_slashings( state: &mut BeaconState, total_balance: u64, - slashing_multiplier: u64, spec: &ChainSpec, ) -> Result<(), Error> { let epoch = state.current_epoch(); let sum_slashings = state.get_all_slashings().iter().copied().safe_sum()?; - let adjusted_total_slashing_balance = - std::cmp::min(sum_slashings.safe_mul(slashing_multiplier)?, total_balance); + let adjusted_total_slashing_balance = std::cmp::min( + sum_slashings.safe_mul(spec.proportional_slashing_multiplier_for_state(state))?, + total_balance, + ); let (validators, mut balances) = state.validators_and_balances_mut(); for index in 0..validators.len() { diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 43eaa89c19..9018db65bc 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,4 +1,4 @@ -use crate::upgrade::upgrade_to_altair; +use crate::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; use types::*; @@ -44,11 +44,17 @@ pub fn per_slot_processing( state.slot_mut().safe_add_assign(1)?; - // If the Altair fork epoch is reached, perform an irregular state upgrade. - if state.slot().safe_rem(T::slots_per_epoch())? == 0 - && spec.altair_fork_epoch == Some(state.current_epoch()) - { - upgrade_to_altair(state, spec)?; + // Process fork upgrades here. Note that multiple upgrades can potentially run + // in sequence if they are scheduled in the same Epoch (common in testnets) + if state.slot().safe_rem(T::slots_per_epoch())? == 0 { + // If the Altair fork epoch is reached, perform an irregular state upgrade. + if spec.altair_fork_epoch == Some(state.current_epoch()) { + upgrade_to_altair(state, spec)?; + } + // If the Merge fork epoch is reached, perform an irregular state upgrade. + if spec.bellatrix_fork_epoch == Some(state.current_epoch()) { + upgrade_to_bellatrix(state, spec)?; + } } Ok(summary) diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index ca8e515967..fdf13c8281 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -1,3 +1,5 @@ pub mod altair; +pub mod merge; pub use altair::upgrade_to_altair; +pub use merge::upgrade_to_bellatrix; diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs new file mode 100644 index 0000000000..2e4ed441a4 --- /dev/null +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -0,0 +1,72 @@ +use std::mem; +use types::{ + BeaconState, BeaconStateError as Error, BeaconStateMerge, ChainSpec, EthSpec, + ExecutionPayloadHeader, Fork, +}; + +/// Transform a `Altair` state into an `Merge` state. +pub fn upgrade_to_bellatrix( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_altair_mut()?; + + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Merge(BeaconStateMerge { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: pre.fork.current_version, + current_version: spec.bellatrix_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: >::default(), + // Caches + total_active_balance: pre.total_active_balance, + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + tree_hash_cache: mem::take(&mut pre.tree_hash_cache), + }); + + *pre_state = post; + + Ok(()) +} diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml index e8f6b6f880..0c89fab80d 100644 --- a/consensus/tree_hash/Cargo.toml +++ b/consensus/tree_hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tree_hash" -version = "0.4.0" +version = "0.4.1" authors = ["Paul Hauner "] edition = "2018" license = "Apache-2.0" @@ -11,7 +11,7 @@ rand = "0.7.3" tree_hash_derive = "0.4.0" types = { path = "../types" } beacon_chain = { path = "../../beacon_node/beacon_chain" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" [dependencies] diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs index 7fdcfceb77..00fed489c7 100644 --- a/consensus/tree_hash/src/impls.rs +++ b/consensus/tree_hash/src/impls.rs @@ -1,5 +1,5 @@ use super::*; -use ethereum_types::{H256, U128, U256}; +use ethereum_types::{H160, H256, U128, U256}; fn int_to_hash256(int: u64) -> Hash256 { let mut bytes = [0; HASHSIZE]; @@ -126,6 +126,28 @@ impl TreeHash for U256 { } } +impl TreeHash for H160 { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Vector + } + + fn tree_hash_packed_encoding(&self) -> Vec { + let mut result = vec![0; 32]; + result[0..20].copy_from_slice(self.as_bytes()); + result + } + + fn tree_hash_packing_factor() -> usize { + 1 + } + + fn tree_hash_root(&self) -> Hash256 { + let mut result = [0; 32]; + result[0..20].copy_from_slice(self.as_bytes()); + Hash256::from_slice(&result) + } +} + impl TreeHash for H256 { fn tree_hash_type() -> TreeHashType { TreeHashType::Vector diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index eeeeb8568a..47933e7117 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -24,12 +24,12 @@ safe_arith = { path = "../safe_arith" } serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" slog = "2.5.2" -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" -eth2_ssz_types = "0.2.1" +eth2_ssz_types = "0.2.2" swap_or_not_shuffle = { path = "../swap_or_not_shuffle" } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = "0.4.0" +tree_hash = "0.4.1" tree_hash_derive = "0.4.0" rand_xorshift = "0.2.0" cached_tree_hash = { path = "../cached_tree_hash" } @@ -38,13 +38,13 @@ tempfile = "3.1.0" derivative = "2.1.1" rusqlite = { version = "0.25.3", features = ["bundled"], optional = true } arbitrary = { version = "1.0", features = ["derive"], optional = true } -eth2_serde_utils = "0.1.0" +eth2_serde_utils = "0.1.1" regex = "1.3.9" lazy_static = "1.4.0" parking_lot = "0.11.1" itertools = "0.10.0" -# superstruct = "0.2.0" -superstruct = { path = "../../../superstruct" } +superstruct = "0.4.0" +serde_json = "1.0.74" milhouse = { path = "../../../milhouse", optional = true } [dev-dependencies] diff --git a/consensus/types/presets/mainnet/bellatrix.yaml b/consensus/types/presets/mainnet/bellatrix.yaml new file mode 100644 index 0000000000..7ae61b732f --- /dev/null +++ b/consensus/types/presets/mainnet/bellatrix.yaml @@ -0,0 +1,21 @@ +# Mainnet preset - Bellatrix + +# Updated penalty values +# --------------------------------------------------------------- +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: 16777216 +# 2**5 (= 32) +MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: 32 +# 3 +PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: 3 + +# Execution +# --------------------------------------------------------------- +# 2**30 (= 1,073,741,824) +MAX_BYTES_PER_TRANSACTION: 1073741824 +# 2**20 (= 1,048,576) +MAX_TRANSACTIONS_PER_PAYLOAD: 1048576 +# 2**8 (= 256) +BYTES_PER_LOGS_BLOOM: 256 +# 2**5 (= 32) +MAX_EXTRA_DATA_BYTES: 32 diff --git a/consensus/types/presets/minimal/bellatrix.yaml b/consensus/types/presets/minimal/bellatrix.yaml new file mode 100644 index 0000000000..3417985fad --- /dev/null +++ b/consensus/types/presets/minimal/bellatrix.yaml @@ -0,0 +1,21 @@ +# Minimal preset - Bellatrix + +# Updated penalty values +# --------------------------------------------------------------- +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: 16777216 +# 2**5 (= 32) +MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: 32 +# 3 +PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: 3 + +# Execution +# --------------------------------------------------------------- +# 2**30 (= 1,073,741,824) +MAX_BYTES_PER_TRANSACTION: 1073741824 +# 2**20 (= 1,048,576) +MAX_TRANSACTIONS_PER_PAYLOAD: 1048576 +# 2**8 (= 256) +BYTES_PER_LOGS_BLOOM: 256 +# 2**5 (= 32) +MAX_EXTRA_DATA_BYTES: 32 diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 66d9e78a85..1c9ec3bc4d 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -1,3 +1,4 @@ +use derivative::Derivative; use safe_arith::ArithError; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -23,7 +24,10 @@ pub enum Error { /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, +)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct Attestation { pub aggregation_bits: BitList, diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index f11b921480..0026db0ee7 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -1,9 +1,11 @@ use crate::beacon_block_body::{ - BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyRef, BeaconBlockBodyRefMut, + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, BeaconBlockBodyRef, + BeaconBlockBodyRefMut, }; use crate::test_utils::TestRandom; use crate::*; use bls::Signature; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; @@ -14,19 +16,20 @@ use tree_hash_derive::TreeHash; /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair), + variants(Base, Altair, Merge), variant_attributes( derive( Debug, - PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, - TestRandom + TestRandom, + Derivative, ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), ), @@ -35,7 +38,8 @@ use tree_hash_derive::TreeHash; tree_hash(enum_behaviour = "transparent") ) )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] #[serde(bound = "T: EthSpec")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] @@ -55,6 +59,8 @@ pub struct BeaconBlock { pub body: BeaconBlockBodyBase, #[superstruct(only(Altair), partial_getter(rename = "body_altair"))] pub body: BeaconBlockBodyAltair, + #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] + pub body: BeaconBlockBodyMerge, } impl SignedRoot for BeaconBlock {} @@ -63,7 +69,9 @@ impl<'a, T: EthSpec> SignedRoot for BeaconBlockRef<'a, T> {} impl BeaconBlock { /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { - if spec.altair_fork_epoch == Some(T::genesis_epoch()) { + if spec.bellatrix_fork_epoch == Some(T::genesis_epoch()) { + Self::Merge(BeaconBlockMerge::empty(spec)) + } else if spec.altair_fork_epoch == Some(T::genesis_epoch()) { Self::Altair(BeaconBlockAltair::empty(spec)) } else { Self::Base(BeaconBlockBase::empty(spec)) @@ -96,9 +104,13 @@ impl BeaconBlock { /// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based /// on the fork slot. pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { - BeaconBlockAltair::from_ssz_bytes(bytes) - .map(BeaconBlock::Altair) - .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) + BeaconBlockMerge::from_ssz_bytes(bytes) + .map(BeaconBlock::Merge) + .or_else(|_| { + BeaconBlockAltair::from_ssz_bytes(bytes) + .map(BeaconBlock::Altair) + .or_else(|_| BeaconBlockBase::from_ssz_bytes(bytes).map(BeaconBlock::Base)) + }) } /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. @@ -171,6 +183,7 @@ impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { let object_fork = match self { BeaconBlockRef::Base { .. } => ForkName::Base, BeaconBlockRef::Altair { .. } => ForkName::Altair, + BeaconBlockRef::Merge { .. } => ForkName::Merge, }; if fork_at_slot == object_fork { @@ -188,6 +201,7 @@ impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { match self { BeaconBlockRef::Base(block) => BeaconBlockBodyRef::Base(&block.body), BeaconBlockRef::Altair(block) => BeaconBlockBodyRef::Altair(&block.body), + BeaconBlockRef::Merge(block) => BeaconBlockBodyRef::Merge(&block.body), } } @@ -196,6 +210,7 @@ impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { match self { BeaconBlockRef::Base(block) => block.body.tree_hash_root(), BeaconBlockRef::Altair(block) => block.body.tree_hash_root(), + BeaconBlockRef::Merge(block) => block.body.tree_hash_root(), } } @@ -222,6 +237,12 @@ impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { ..self.block_header() } } + + /// Extracts a reference to an execution payload from a block, returning an error if the block + /// is pre-merge. + pub fn execution_payload(&self) -> Result<&ExecutionPayload, Error> { + self.body().execution_payload() + } } impl<'a, T: EthSpec> BeaconBlockRefMut<'a, T> { @@ -230,6 +251,7 @@ impl<'a, T: EthSpec> BeaconBlockRefMut<'a, T> { match self { BeaconBlockRefMut::Base(block) => BeaconBlockBodyRefMut::Base(&mut block.body), BeaconBlockRefMut::Altair(block) => BeaconBlockBodyRefMut::Altair(&mut block.body), + BeaconBlockRefMut::Merge(block) => BeaconBlockBodyRefMut::Merge(&mut block.body), } } } @@ -411,6 +433,61 @@ impl BeaconBlockAltair { } } +impl BeaconBlockMerge { + /// Returns an empty Merge block to be used during genesis. + pub fn empty(spec: &ChainSpec) -> Self { + BeaconBlockMerge { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyMerge { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: ExecutionPayload::empty(), + }, + } + } + + /// Return an Merge block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let altair_block = BeaconBlockAltair::full(spec); + BeaconBlockMerge { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyMerge { + proposer_slashings: altair_block.body.proposer_slashings, + attester_slashings: altair_block.body.attester_slashings, + attestations: altair_block.body.attestations, + deposits: altair_block.body.deposits, + voluntary_exits: altair_block.body.voluntary_exits, + sync_aggregate: altair_block.body.sync_aggregate, + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + execution_payload: ExecutionPayload::default(), + }, + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index ceb90fef90..c4df4f2771 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -11,24 +12,28 @@ use tree_hash_derive::TreeHash; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair), + variants(Base, Altair, Merge), variant_attributes( derive( Debug, - PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, - TestRandom + TestRandom, + Derivative, ), + derivative(PartialEq, Hash(bound = "T: EthSpec")), serde(bound = "T: EthSpec", deny_unknown_fields), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) - ) + ), + cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] #[serde(bound = "T: EthSpec")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] @@ -41,16 +46,19 @@ pub struct BeaconBlockBody { pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub sync_aggregate: SyncAggregate, + #[superstruct(only(Merge))] + pub execution_payload: ExecutionPayload, } impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { - /// Access the sync aggregate from the block's body, if one exists. - pub fn sync_aggregate(self) -> Option<&'a SyncAggregate> { + /// Get the fork_name of this object + pub fn fork_name(self) -> ForkName { match self { - BeaconBlockBodyRef::Base(_) => None, - BeaconBlockBodyRef::Altair(inner) => Some(&inner.sync_aggregate), + BeaconBlockBodyRef::Base { .. } => ForkName::Base, + BeaconBlockBodyRef::Altair { .. } => ForkName::Altair, + BeaconBlockBodyRef::Merge { .. } => ForkName::Merge, } } } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index f9373019b7..cebc8fd94f 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -33,7 +33,9 @@ pub use iter::BlockRootsIter; pub use milhouse::{interface::Interface, List as VList, List}; #[cfg(not(feature = "milhouse"))] -pub use {ssz_types::FixedVector, tree_hash_cache::BeaconTreeHashCache, VariableList as VList}; +pub use { + ssz_types::FixedVector, ssz_types::VariableList as VList, tree_hash_cache::BeaconTreeHashCache, +}; #[macro_use] mod committee_cache; @@ -47,6 +49,8 @@ mod tree_hash_cache; #[cfg(feature = "milhouse")] pub type ListMut<'a, T, N> = Interface<'a, T, List>; +#[cfg(not(feature = "milhouse"))] +pub type ListMut<'a, T, N> = &'a mut VList; #[cfg(feature = "milhouse")] pub type ValidatorsMut<'a, N> = ListMut<'a, Validator, N>; @@ -193,7 +197,7 @@ impl From for Hash256 { /// The state of the `BeaconChain` at some slot. #[superstruct( - variants(Base, Altair), + variants(Base, Altair, Merge), variant_attributes( derive( Derivative, @@ -278,9 +282,9 @@ where pub current_epoch_attestations: VList, T::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub current_epoch_participation: VariableList, // Finality @@ -295,15 +299,19 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub inactivity_scores: VariableList, // Light-client sync committees - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair))] + #[superstruct(only(Altair, Merge))] pub next_sync_committee: Arc>, + // Execution + #[superstruct(only(Merge))] + pub latest_execution_payload_header: ExecutionPayloadHeader, + // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] @@ -414,6 +422,7 @@ impl BeaconState { let object_fork = match self { BeaconState::Base { .. } => ForkName::Base, BeaconState::Altair { .. } => ForkName::Altair, + BeaconState::Merge { .. } => ForkName::Merge, }; if fork_at_slot == object_fork { @@ -1155,6 +1164,7 @@ impl BeaconState { match self { BeaconState::Base(state) => (&mut state.validators, &mut state.balances), BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), + BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), } #[cfg(feature = "milhouse")] @@ -1351,11 +1361,13 @@ impl BeaconState { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Merge(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == self.previous_epoch() { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Merge(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -1651,6 +1663,7 @@ impl BeaconState { let mut res = match self { BeaconState::Base(inner) => BeaconState::Base(inner.clone()), BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), + BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), }; if config.committee_caches { *res.committee_caches_mut() = self.committee_caches().clone(); @@ -1762,7 +1775,8 @@ impl CompareFields for BeaconState { match (self, other) { (BeaconState::Base(x), BeaconState::Base(y)) => x.compare_fields(y), (BeaconState::Altair(x), BeaconState::Altair(y)) => x.compare_fields(y), - _ => panic!("compare_fields: mismatched state variants"), + (BeaconState::Merge(x), BeaconState::Merge(y)) => x.compare_fields(y), + _ => panic!("compare_fields: mismatched state variants",), } } } diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index d8b6c796c0..b88b49e1a3 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -3,6 +3,7 @@ use crate::test_utils::*; use crate::test_utils::{SeedableRng, XorShiftRng}; use beacon_chain::test_utils::{ interop_genesis_state, test_spec, BeaconChainHarness, EphemeralHarnessType, + DEFAULT_ETH1_BLOCK_HASH, }; use beacon_chain::types::{ test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, @@ -557,7 +558,14 @@ fn tree_hash_cache_linear_history_long_skip() { let spec = &test_spec::(); // This state has a cache that advances normally each slot. - let mut state: BeaconState = interop_genesis_state(&keypairs, 0, spec).unwrap(); + let mut state: BeaconState = interop_genesis_state( + &keypairs, + 0, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + spec, + ) + .unwrap(); state.update_tree_hash_cache().unwrap(); diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs index 39a8b659dd..40b2c4bde0 100644 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ b/consensus/types/src/beacon_state/tree_hash_cache.rs @@ -341,16 +341,26 @@ impl BeaconTreeHashCacheInner { )?; hasher.write(state.finalized_checkpoint().tree_hash_root().as_bytes())?; - // Inactivity & light-client sync committees - if let BeaconState::Altair(ref state) = state { + // Inactivity & light-client sync committees (Altair and later). + if let Ok(inactivity_scores) = state.inactivity_scores() { hasher.write( self.inactivity_scores - .recalculate_tree_hash_root(&state.inactivity_scores)? + .recalculate_tree_hash_root(inactivity_scores)? .as_bytes(), )?; + } - hasher.write(state.current_sync_committee.tree_hash_root().as_bytes())?; - hasher.write(state.next_sync_committee.tree_hash_root().as_bytes())?; + if let Ok(current_sync_committee) = state.current_sync_committee() { + hasher.write(current_sync_committee.tree_hash_root().as_bytes())?; + } + + if let Ok(next_sync_committee) = state.next_sync_committee() { + hasher.write(next_sync_committee.tree_hash_root().as_bytes())?; + } + + // Execution payload (merge and later). + if let Ok(payload_header) = state.latest_execution_payload_header() { + hasher.write(payload_header.tree_hash_root().as_bytes())?; } let root = hasher.finish()?; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ee213f1f87..f191eb8671 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -101,6 +101,7 @@ pub struct ChainSpec { * Fork choice */ pub safe_slots_to_update_justified: u64, + pub proposer_score_boost: Option, /* * Eth1 @@ -128,6 +129,19 @@ pub struct ChainSpec { /// The Altair fork epoch is optional, with `None` representing "Altair never happens". pub altair_fork_epoch: Option, + /* + * Merge hard fork params + */ + pub inactivity_penalty_quotient_bellatrix: u64, + pub min_slashing_penalty_quotient_bellatrix: u64, + pub proportional_slashing_multiplier_bellatrix: u64, + pub bellatrix_fork_version: [u8; 4], + /// The Merge fork epoch is optional, with `None` representing "Merge never happens". + pub bellatrix_fork_epoch: Option, + pub terminal_total_difficulty: Uint256, + pub terminal_block_hash: Hash256, + pub terminal_block_hash_activation_epoch: Epoch, + /* * Networking */ @@ -156,7 +170,7 @@ impl ChainSpec { ) -> EnrForkId { EnrForkId { fork_digest: self.fork_digest::(slot, genesis_validators_root), - next_fork_version: self.next_fork_version(), + next_fork_version: self.next_fork_version::(slot), next_fork_epoch: self .next_fork_epoch::(slot) .map(|(_, e)| e) @@ -178,10 +192,12 @@ impl ChainSpec { /// Returns the `next_fork_version`. /// - /// Since `next_fork_version = current_fork_version` if no future fork is planned, - /// this function returns `altair_fork_version` until the next fork is planned. - pub fn next_fork_version(&self) -> [u8; 4] { - self.altair_fork_version + /// `next_fork_version = current_fork_version` if no future fork is planned, + pub fn next_fork_version(&self, slot: Slot) -> [u8; 4] { + match self.next_fork_epoch::(slot) { + Some((fork, _)) => self.fork_version_for_name(fork), + None => self.fork_version_for_name(self.fork_name_at_slot::(slot)), + } } /// Returns the epoch of the next scheduled fork along with its corresponding `ForkName`. @@ -201,9 +217,12 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, + match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Merge, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, } } @@ -212,6 +231,7 @@ impl ChainSpec { match fork_name { ForkName::Base => self.genesis_fork_version, ForkName::Altair => self.altair_fork_version, + ForkName::Merge => self.bellatrix_fork_version, } } @@ -220,6 +240,40 @@ impl ChainSpec { match fork_name { ForkName::Base => Some(Epoch::new(0)), ForkName::Altair => self.altair_fork_epoch, + ForkName::Merge => self.bellatrix_fork_epoch, + } + } + + /// For a given `BeaconState`, return the inactivity penalty quotient associated with its variant. + pub fn inactivity_penalty_quotient_for_state(&self, state: &BeaconState) -> u64 { + match state { + BeaconState::Base(_) => self.inactivity_penalty_quotient, + BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair, + BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix, + } + } + + /// For a given `BeaconState`, return the proportional slashing multiplier associated with its variant. + pub fn proportional_slashing_multiplier_for_state( + &self, + state: &BeaconState, + ) -> u64 { + match state { + BeaconState::Base(_) => self.proportional_slashing_multiplier, + BeaconState::Altair(_) => self.proportional_slashing_multiplier_altair, + BeaconState::Merge(_) => self.proportional_slashing_multiplier_bellatrix, + } + } + + /// For a given `BeaconState`, return the minimum slashing penalty quotient associated with its variant. + pub fn min_slashing_penalty_quotient_for_state( + &self, + state: &BeaconState, + ) -> u64 { + match state { + BeaconState::Base(_) => self.min_slashing_penalty_quotient, + BeaconState::Altair(_) => self.min_slashing_penalty_quotient_altair, + BeaconState::Merge(_) => self.min_slashing_penalty_quotient_bellatrix, } } @@ -355,7 +409,7 @@ impl ChainSpec { * Constants */ genesis_slot: Slot::new(0), - far_future_epoch: Epoch::new(u64::max_value()), + far_future_epoch: Epoch::new(u64::MAX), base_rewards_per_epoch: 4, deposit_contract_tree_depth: 32, @@ -436,6 +490,7 @@ impl ChainSpec { * Fork choice */ safe_slots_to_update_justified: 8, + proposer_score_boost: None, /* * Eth1 @@ -468,6 +523,26 @@ impl ChainSpec { altair_fork_version: [0x01, 0x00, 0x00, 0x00], altair_fork_epoch: Some(Epoch::new(74240)), + /* + * Merge hard fork params + */ + inactivity_penalty_quotient_bellatrix: u64::checked_pow(2, 24) + .expect("pow does not overflow"), + min_slashing_penalty_quotient_bellatrix: u64::checked_pow(2, 5) + .expect("pow does not overflow"), + proportional_slashing_multiplier_bellatrix: 3, + bellatrix_fork_version: [0x02, 0x00, 0x00, 0x00], + bellatrix_fork_epoch: None, + terminal_total_difficulty: Uint256::MAX + .checked_sub(Uint256::from(2u64.pow(10))) + .expect("subtraction does not overflow") + // Add 1 since the spec declares `2**256 - 2**10` and we use + // `Uint256::MAX` which is `2*256- 1`. + .checked_add(Uint256::one()) + .expect("addition does not overflow"), + terminal_block_hash: Hash256::zero(), + terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), + /* * Network specific */ @@ -507,6 +582,9 @@ impl ChainSpec { epochs_per_sync_committee_period: Epoch::new(8), altair_fork_version: [0x01, 0x00, 0x00, 0x01], altair_fork_epoch: None, + // Merge + bellatrix_fork_version: [0x02, 0x00, 0x00, 0x01], + bellatrix_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -533,6 +611,17 @@ pub struct Config { #[serde(default)] pub preset_base: String, + // TODO(merge): remove this default + #[serde(default = "default_terminal_total_difficulty")] + #[serde(with = "eth2_serde_utils::quoted_u256")] + pub terminal_total_difficulty: Uint256, + // TODO(merge): remove this default + #[serde(default = "default_terminal_block_hash")] + pub terminal_block_hash: Hash256, + // TODO(merge): remove this default + #[serde(default = "default_terminal_block_hash_activation_epoch")] + pub terminal_block_hash_activation_epoch: Epoch, + #[serde(with = "eth2_serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] @@ -548,6 +637,16 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub altair_fork_epoch: Option>, + // TODO(merge): remove this default + #[serde(default = "default_bellatrix_fork_version")] + #[serde(with = "eth2_serde_utils::bytes_4_hex")] + bellatrix_fork_version: [u8; 4], + // TODO(merge): remove this default + #[serde(default = "default_bellatrix_fork_epoch")] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub bellatrix_fork_epoch: Option>, + #[serde(with = "eth2_serde_utils::quoted_u64")] seconds_per_slot: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] @@ -570,6 +669,9 @@ pub struct Config { #[serde(with = "eth2_serde_utils::quoted_u64")] churn_limit_quotient: u64, + #[serde(skip_serializing_if = "Option::is_none")] + proposer_score_boost: Option>, + #[serde(with = "eth2_serde_utils::quoted_u64")] deposit_chain_id: u64, #[serde(with = "eth2_serde_utils::quoted_u64")] @@ -577,6 +679,29 @@ pub struct Config { deposit_contract_address: Address, } +fn default_bellatrix_fork_version() -> [u8; 4] { + // This value shouldn't be used. + [0xff, 0xff, 0xff, 0xff] +} + +fn default_bellatrix_fork_epoch() -> Option> { + None +} + +fn default_terminal_total_difficulty() -> Uint256 { + "115792089237316195423570985008687907853269984665640564039457584007913129638912" + .parse() + .unwrap() +} + +fn default_terminal_block_hash() -> Hash256 { + Hash256::zero() +} + +fn default_terminal_block_hash_activation_epoch() -> Epoch { + Epoch::new(u64::MAX) +} + impl Default for Config { fn default() -> Self { let chain_spec = MainnetEthSpec::default_spec(); @@ -629,6 +754,10 @@ impl Config { Self { preset_base: T::spec_name().to_string(), + terminal_total_difficulty: spec.terminal_total_difficulty, + terminal_block_hash: spec.terminal_block_hash, + terminal_block_hash_activation_epoch: spec.terminal_block_hash_activation_epoch, + min_genesis_active_validator_count: spec.min_genesis_active_validator_count, min_genesis_time: spec.min_genesis_time, genesis_fork_version: spec.genesis_fork_version, @@ -638,6 +767,10 @@ impl Config { altair_fork_epoch: spec .altair_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), + bellatrix_fork_version: spec.bellatrix_fork_version, + bellatrix_fork_epoch: spec + .bellatrix_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), seconds_per_slot: spec.seconds_per_slot, seconds_per_eth1_block: spec.seconds_per_eth1_block, @@ -651,6 +784,8 @@ impl Config { churn_limit_quotient: spec.churn_limit_quotient, min_per_epoch_churn_limit: spec.min_per_epoch_churn_limit, + proposer_score_boost: spec.proposer_score_boost.map(|value| MaybeQuoted { value }), + deposit_chain_id: spec.deposit_chain_id, deposit_network_id: spec.deposit_network_id, deposit_contract_address: spec.deposit_contract_address, @@ -668,12 +803,17 @@ impl Config { // Pattern match here to avoid missing any fields. let &Config { ref preset_base, + terminal_total_difficulty, + terminal_block_hash, + terminal_block_hash_activation_epoch, min_genesis_active_validator_count, min_genesis_time, genesis_fork_version, genesis_delay, altair_fork_version, altair_fork_epoch, + bellatrix_fork_epoch, + bellatrix_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -684,6 +824,7 @@ impl Config { ejection_balance, min_per_epoch_churn_limit, churn_limit_quotient, + proposer_score_boost, deposit_chain_id, deposit_network_id, deposit_contract_address, @@ -700,6 +841,8 @@ impl Config { genesis_delay, altair_fork_version, altair_fork_epoch: altair_fork_epoch.map(|q| q.value), + bellatrix_fork_epoch: bellatrix_fork_epoch.map(|q| q.value), + bellatrix_fork_version, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -710,9 +853,13 @@ impl Config { ejection_balance, min_per_epoch_churn_limit, churn_limit_quotient, + proposer_score_boost: proposer_score_boost.map(|q| q.value), deposit_chain_id, deposit_network_id, deposit_contract_address, + terminal_total_difficulty, + terminal_block_hash, + terminal_block_hash_activation_epoch, ..chain_spec.clone() }) } diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index 16d36c850c..affda1a061 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,5 +1,6 @@ -use crate::{AltairPreset, BasePreset, ChainSpec, Config, EthSpec}; +use crate::{AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec}; use serde_derive::{Deserialize, Serialize}; +use serde_json::Value; use std::collections::HashMap; /// Fusion of a runtime-config with the compile-time preset values. @@ -14,10 +15,12 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - + // TODO(merge): re-enable + // #[serde(flatten)] + // pub bellatrix_preset: BellatrixPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] - pub extra_fields: HashMap, + pub extra_fields: HashMap, } impl ConfigAndPreset { @@ -25,6 +28,8 @@ impl ConfigAndPreset { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); let altair_preset = AltairPreset::from_chain_spec::(spec); + // TODO(merge): re-enable + let _bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let extra_fields = HashMap::new(); Self { @@ -79,7 +84,7 @@ impl ConfigAndPreset { ), ]; for (key, value) in fields { - self.extra_fields.insert(key.to_uppercase(), value); + self.extra_fields.insert(key.to_uppercase(), value.into()); } } } @@ -103,8 +108,13 @@ mod test { let mut yamlconfig = ConfigAndPreset::from_chain_spec::(&mainnet_spec); let (k1, v1) = ("SAMPLE_HARDFORK_KEY1", "123456789"); let (k2, v2) = ("SAMPLE_HARDFORK_KEY2", "987654321"); + let (k3, v3) = ("SAMPLE_HARDFORK_KEY3", 32); + let (k4, v4) = ("SAMPLE_HARDFORK_KEY4", Value::Null); yamlconfig.extra_fields.insert(k1.into(), v1.into()); yamlconfig.extra_fields.insert(k2.into(), v2.into()); + yamlconfig.extra_fields.insert(k3.into(), v3.into()); + yamlconfig.extra_fields.insert(k4.into(), v4); + serde_yaml::to_writer(writer, &yamlconfig).expect("failed to write or serialize"); let reader = OpenOptions::new() diff --git a/consensus/types/src/consts.rs b/consensus/types/src/consts.rs index 04e8e60ee5..a9377bc3e0 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/consts.rs @@ -19,3 +19,6 @@ pub mod altair { pub const NUM_FLAG_INDICES: usize = 3; } +pub mod merge { + pub const INTERVALS_PER_SLOT: u64 = 3; +} diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index 4b201360ab..a347cf675c 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -12,7 +12,9 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct Deposit { pub proof: FixedVector, pub data: DepositData, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index d984f168f1..6c5444e110 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -11,7 +11,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 6e21edf9f6..ae0cafe1ff 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -3,12 +3,15 @@ use crate::*; use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use ssz_types::typenum::{ - Unsigned, U0, U1024, U1099511627776, U128, U16, U16777216, U2, U2048, U32, U4, U4096, U512, - U64, U65536, U8, U8192, + Unsigned, U0, U1024, U1073741824, U1099511627776, U128, U16, U16777216, U2, U2048, U32, U4, + U4096, U512, U64, U65536, U8, U8192, }; use std::fmt::{self, Debug}; use std::str::FromStr; +use ssz_types::typenum::{bit::B0, UInt, U1048576, U256, U625}; +pub type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 + const MAINNET: &str = "mainnet"; const MINIMAL: &str = "minimal"; @@ -80,6 +83,15 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type SyncCommitteeSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; /// The number of `sync_committee` subnets. type SyncCommitteeSubnetCount: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Merge + */ + type MaxBytesPerTransaction: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxTransactionsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type BytesPerLogsBloom: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type GasLimitDenominator: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MinGasLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxExtraDataBytes: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -187,6 +199,26 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn sync_subcommittee_size() -> usize { Self::SyncSubcommitteeSize::to_usize() } + + /// Returns the `MAX_BYTES_PER_TRANSACTION` constant for this specification. + fn max_bytes_per_transaction() -> usize { + Self::MaxBytesPerTransaction::to_usize() + } + + /// Returns the `MAX_TRANSACTIONS_PER_PAYLOAD` constant for this specification. + fn max_transactions_per_payload() -> usize { + Self::MaxTransactionsPerPayload::to_usize() + } + + /// Returns the `MAX_EXTRA_DATA_BYTES` constant for this specification. + fn max_extra_data_bytes() -> usize { + Self::MaxExtraDataBytes::to_usize() + } + + /// Returns the `BYTES_PER_LOGS_BLOOM` constant for this specification. + fn bytes_per_logs_bloom() -> usize { + Self::BytesPerLogsBloom::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -221,6 +253,12 @@ impl EthSpec for MainnetEthSpec { type MaxVoluntaryExits = U16; type SyncCommitteeSize = U512; type SyncCommitteeSubnetCount = U4; + type MaxBytesPerTransaction = U1073741824; // 1,073,741,824 + type MaxTransactionsPerPayload = U1048576; // 1,048,576 + type BytesPerLogsBloom = U256; + type GasLimitDenominator = U1024; + type MinGasLimit = U5000; + type MaxExtraDataBytes = U32; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -262,7 +300,13 @@ impl EthSpec for MinimalEthSpec { MaxAttesterSlashings, MaxAttestations, MaxDeposits, - MaxVoluntaryExits + MaxVoluntaryExits, + MaxBytesPerTransaction, + MaxTransactionsPerPayload, + BytesPerLogsBloom, + GasLimitDenominator, + MinGasLimit, + MaxExtraDataBytes }); fn default_spec() -> ChainSpec { diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs new file mode 100644 index 0000000000..2fb253f12c --- /dev/null +++ b/consensus/types/src/execution_payload.rs @@ -0,0 +1,58 @@ +use crate::{test_utils::TestRandom, *}; +use derivative::Derivative; +use serde_derive::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +pub type Transaction = VariableList; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive( + Default, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, +)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +pub struct ExecutionPayload { + pub parent_hash: Hash256, + pub fee_recipient: Address, + pub state_root: Hash256, + pub receipt_root: Hash256, + #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] + pub logs_bloom: FixedVector, + pub random: Hash256, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub block_number: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_limit: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_used: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub timestamp: u64, + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub extra_data: VariableList, + #[serde(with = "eth2_serde_utils::quoted_u256")] + pub base_fee_per_gas: Uint256, + pub block_hash: Hash256, + #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] + pub transactions: + VariableList, T::MaxTransactionsPerPayload>, +} + +impl ExecutionPayload { + pub fn empty() -> Self { + Self::default() + } + + #[allow(clippy::integer_arithmetic)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_size() -> usize { + // Fixed part + Self::empty().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (T::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + } +} diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs new file mode 100644 index 0000000000..6cb76a6465 --- /dev/null +++ b/consensus/types/src/execution_payload_header.rs @@ -0,0 +1,39 @@ +use crate::{test_utils::TestRandom, *}; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive( + Default, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] +pub struct ExecutionPayloadHeader { + pub parent_hash: Hash256, + pub fee_recipient: Address, + pub state_root: Hash256, + pub receipt_root: Hash256, + #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] + pub logs_bloom: FixedVector, + pub random: Hash256, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub block_number: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_limit: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_used: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub timestamp: u64, + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub extra_data: VariableList, + #[serde(with = "eth2_serde_utils::quoted_u256")] + pub base_fee_per_gas: Uint256, + pub block_hash: Hash256, + pub transactions_root: Hash256, +} + +impl ExecutionPayloadHeader { + pub fn empty() -> Self { + Self::default() + } +} diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 1d488f7696..52b9294c8c 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -35,6 +35,18 @@ impl ForkContext { )); } + // Only add Merge to list of forks if it's enabled + // Note: `bellatrix_fork_epoch == None` implies merge hasn't been activated yet on the config. + if spec.bellatrix_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Merge, + ChainSpec::compute_fork_digest( + spec.bellatrix_fork_version, + genesis_validators_root, + ), + )); + } + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); let digest_to_fork = fork_to_digest diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 85ba35e395..4a2e762087 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -10,11 +10,12 @@ use std::str::FromStr; pub enum ForkName { Base, Altair, + Merge, } impl ForkName { pub fn list_all() -> Vec { - vec![ForkName::Base, ForkName::Altair] + vec![ForkName::Base, ForkName::Altair, ForkName::Merge] } /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` @@ -24,10 +25,17 @@ impl ForkName { match self { ForkName::Base => { spec.altair_fork_epoch = None; + spec.bellatrix_fork_epoch = None; spec } ForkName::Altair => { spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = None; + spec + } + ForkName::Merge => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); spec } } @@ -40,6 +48,7 @@ impl ForkName { match self { ForkName::Base => None, ForkName::Altair => Some(ForkName::Base), + ForkName::Merge => Some(ForkName::Altair), } } @@ -49,7 +58,8 @@ impl ForkName { pub fn next_fork(self) -> Option { match self { ForkName::Base => Some(ForkName::Altair), - ForkName::Altair => None, + ForkName::Altair => Some(ForkName::Merge), + ForkName::Merge => None, } } } @@ -87,6 +97,10 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Altair(value), extra_data) } + ForkName::Merge => { + let (value, extra_data) = $body; + ($t::Merge(value), extra_data) + } } }; } @@ -98,6 +112,7 @@ impl FromStr for ForkName { Ok(match fork_name.to_lowercase().as_ref() { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, + "bellatrix" | "merge" => ForkName::Merge, _ => return Err(()), }) } @@ -108,6 +123,7 @@ impl Display for ForkName { match self { ForkName::Base => "phase0".fmt(f), ForkName::Altair => "altair".fmt(f), + ForkName::Merge => "bellatrix".fmt(f), } } } @@ -139,7 +155,7 @@ mod test { #[test] fn previous_and_next_fork_consistent() { - assert_eq!(ForkName::Altair.next_fork(), None); + assert_eq!(ForkName::Merge.next_fork(), None); assert_eq!(ForkName::Base.previous_fork(), None); for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() { @@ -165,4 +181,11 @@ mod test { assert_eq!(ForkName::from_str("NO_NAME"), Err(())); assert_eq!(ForkName::from_str("no_name"), Err(())); } + + #[test] + fn fork_name_bellatrix_or_merge() { + assert_eq!(ForkName::from_str("bellatrix"), Ok(ForkName::Merge)); + assert_eq!(ForkName::from_str("merge"), Ok(ForkName::Merge)); + assert_eq!(ForkName::Merge.to_string(), "bellatrix"); + } } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index cecd6c2018..f5f74b601b 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -12,7 +12,7 @@ use tree_hash::TreeHash; pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. -#[derive(Default, Debug, PartialEq, Clone, Copy, Serialize, Deserialize)] +#[derive(Default, Debug, PartialEq, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(transparent)] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 86a8aca98a..4a52f28b86 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -37,6 +37,8 @@ pub mod deposit_message; pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; +pub mod execution_payload; +pub mod execution_payload_header; pub mod fork; pub mod fork_data; pub mod fork_name; @@ -91,11 +93,12 @@ pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockRef, BeaconBlockRefMut, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BeaconBlockRef, + BeaconBlockRefMut, }; pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, + BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyMerge, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; @@ -110,6 +113,8 @@ pub use crate::deposit_message::DepositMessage; pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; +pub use crate::execution_payload::{ExecutionPayload, Transaction}; +pub use crate::execution_payload_header::ExecutionPayloadHeader; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; @@ -122,7 +127,7 @@ pub use crate::mixin::{GetBalanceMut, GetValidatorMut}; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; pub use crate::pending_attestation::PendingAttestation; -pub use crate::preset::{AltairPreset, BasePreset}; +pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::selection_proof::SelectionProof; @@ -130,6 +135,7 @@ pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, + SignedBeaconBlockMerge, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; @@ -152,6 +158,7 @@ pub use crate::voluntary_exit::VoluntaryExit; pub type CommitteeIndex = u64; pub type Hash256 = H256; +pub type Uint256 = ethereum_types::U256; pub type Address = H160; pub type ForkVersion = [u8; 4]; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 030c123405..ccda1a06a0 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -150,6 +150,40 @@ impl AltairPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct BellatrixPreset { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub inactivity_penalty_quotient_bellatrix: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub min_slashing_penalty_quotient_bellatrix: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub proportional_slashing_multiplier_bellatrix: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_bytes_per_transaction: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_transactions_per_payload: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub bytes_per_logs_bloom: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub max_extra_data_bytes: u64, +} + +impl BellatrixPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + inactivity_penalty_quotient_bellatrix: spec.inactivity_penalty_quotient_bellatrix, + min_slashing_penalty_quotient_bellatrix: spec.min_slashing_penalty_quotient_bellatrix, + proportional_slashing_multiplier_bellatrix: spec + .proportional_slashing_multiplier_bellatrix, + max_bytes_per_transaction: T::max_bytes_per_transaction() as u64, + max_transactions_per_payload: T::max_transactions_per_payload() as u64, + bytes_per_logs_bloom: T::bytes_per_logs_bloom() as u64, + max_extra_data_bytes: T::max_extra_data_bytes() as u64, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -182,6 +216,9 @@ mod test { let altair: AltairPreset = preset_from_file(&preset_name, "altair.yaml"); assert_eq!(altair, AltairPreset::from_chain_spec::(&spec)); + + let bellatrix: BellatrixPreset = preset_from_file(&preset_name, "bellatrix.yaml"); + assert_eq!(bellatrix, BellatrixPreset::from_chain_spec::(&spec)); } #[test] diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index a9d6f1d98b..8d7df0cb02 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -1,5 +1,6 @@ use crate::*; use bls::Signature; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::fmt; @@ -37,23 +38,25 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair), + variants(Base, Altair, Merge), variant_attributes( derive( Debug, - PartialEq, Clone, Serialize, Deserialize, Encode, Decode, - TreeHash + TreeHash, + Derivative, ), + derivative(PartialEq, Hash(bound = "E: EthSpec")), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), serde(bound = "E: EthSpec") ) )] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] +#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] #[serde(bound = "E: EthSpec")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] @@ -64,6 +67,8 @@ pub struct SignedBeaconBlock { pub message: BeaconBlockBase, #[superstruct(only(Altair), partial_getter(rename = "message_altair"))] pub message: BeaconBlockAltair, + #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] + pub message: BeaconBlockMerge, pub signature: Signature, } @@ -116,6 +121,9 @@ impl SignedBeaconBlock { BeaconBlock::Altair(message) => { SignedBeaconBlock::Altair(SignedBeaconBlockAltair { message, signature }) } + BeaconBlock::Merge(message) => { + SignedBeaconBlock::Merge(SignedBeaconBlockMerge { message, signature }) + } } } @@ -129,6 +137,7 @@ impl SignedBeaconBlock { SignedBeaconBlock::Altair(block) => { (BeaconBlock::Altair(block.message), block.signature) } + SignedBeaconBlock::Merge(block) => (BeaconBlock::Merge(block.message), block.signature), } } @@ -137,6 +146,7 @@ impl SignedBeaconBlock { match self { SignedBeaconBlock::Base(inner) => BeaconBlockRef::Base(&inner.message), SignedBeaconBlock::Altair(inner) => BeaconBlockRef::Altair(&inner.message), + SignedBeaconBlock::Merge(inner) => BeaconBlockRef::Merge(&inner.message), } } @@ -145,6 +155,7 @@ impl SignedBeaconBlock { match self { SignedBeaconBlock::Base(inner) => BeaconBlockRefMut::Base(&mut inner.message), SignedBeaconBlock::Altair(inner) => BeaconBlockRefMut::Altair(&mut inner.message), + SignedBeaconBlock::Merge(inner) => BeaconBlockRefMut::Merge(&mut inner.message), } } diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/signed_beacon_block_header.rs index df7888ec25..dc786beb6e 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/signed_beacon_block_header.rs @@ -2,11 +2,8 @@ use crate::{ test_utils::TestRandom, BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, SignedRoot, }; -use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; -use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::hash::{Hash, Hasher}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -15,26 +12,13 @@ use tree_hash_derive::TreeHash; /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Derivative, Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] -#[derivative(PartialEq, Eq)] pub struct SignedBeaconBlockHeader { pub message: BeaconBlockHeader, pub signature: Signature, } -/// Implementation of non-crypto-secure `Hash`, for use with `HashMap` and `HashSet`. -/// -/// Guarantees `header1 == header2 -> hash(header1) == hash(header2)`. -/// -/// Used in the slasher. -impl Hash for SignedBeaconBlockHeader { - fn hash(&self, state: &mut H) { - self.message.hash(state); - self.signature.as_ssz_bytes().hash(state); - } -} - impl SignedBeaconBlockHeader { /// Verify that this block header was signed by `pubkey`. pub fn verify_signature( diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/signed_voluntary_exit.rs index 49a9b53455..69f0e6e2c9 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/signed_voluntary_exit.rs @@ -10,7 +10,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct SignedVoluntaryExit { pub message: VoluntaryExit, pub signature: Signature, diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_aggregate.rs index 781c67374e..2292b02111 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_aggregate.rs @@ -1,6 +1,7 @@ use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::test_utils::TestRandom; use crate::{AggregateSignature, BitVector, EthSpec, SyncCommitteeContribution}; +use derivative::Derivative; use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -20,7 +21,10 @@ impl From for Error { } #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, +)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(bound = "T: EthSpec")] pub struct SyncAggregate { pub sync_committee_bits: BitVector, diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index bafbdca5f4..064b57f428 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -14,6 +14,7 @@ mod public_key_bytes; mod secret_key; mod signature; mod signature_bytes; +mod uint256; pub fn test_random_instance() -> T { let mut rng = XorShiftRng::from_seed([0x42; 16]); diff --git a/consensus/types/src/test_utils/test_random/uint256.rs b/consensus/types/src/test_utils/test_random/uint256.rs new file mode 100644 index 0000000000..a74cc6b3d8 --- /dev/null +++ b/consensus/types/src/test_utils/test_random/uint256.rs @@ -0,0 +1,10 @@ +use super::*; +use crate::Uint256; + +impl TestRandom for Uint256 { + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut key_bytes = [0; 32]; + rng.fill_bytes(&mut key_bytes); + Self::from_little_endian(&key_bytes[..]) + } +} diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 66d2f00947..cc10632d07 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -12,7 +12,9 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.12.1 #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index ef26fd1f91..9600da6df3 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -5,13 +5,13 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -eth2_ssz = "0.4.0" -tree_hash = "0.4.0" +eth2_ssz = "0.4.1" +tree_hash = "0.4.1" milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.4.2", optional = true } rand = "0.7.3" serde = "1.0.116" serde_derive = "1.0.116" -eth2_serde_utils = "0.1.0" +eth2_serde_utils = "0.1.1" hex = "0.4.2" eth2_hashing = "0.2.0" ethereum-types = "0.12.1" diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 2001de042b..fdb59626fb 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -9,6 +9,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz::{Decode, Encode}; use std::fmt; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -264,6 +265,18 @@ where impl_tree_hash!(SIGNATURE_BYTES_LEN); } +/// Hashes the `self.serialize()` bytes. +#[allow(clippy::derive_hash_xor_eq)] +impl Hash for GenericAggregateSignature +where + Sig: TSignature, + AggSig: TAggregateSignature, +{ + fn hash(&self, state: &mut H) { + self.serialize().hash(state); + } +} + impl fmt::Display for GenericAggregateSignature where Sig: TSignature, diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index f3aeeb5598..10ef75fc68 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -7,6 +7,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz::{Decode, Encode}; use std::fmt; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -145,6 +146,13 @@ impl> TreeHash for GenericSignature> Hash for GenericSignature { + fn hash(&self, state: &mut H) { + self.serialize().hash(state); + } +} + impl> fmt::Display for GenericSignature { impl_display!(); } diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index b5c0284971..aa33c90d0c 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -9,6 +9,7 @@ use serde::ser::{Serialize, Serializer}; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; +use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -84,6 +85,12 @@ impl PartialEq for GenericSignatureBytes { } } +impl Hash for GenericSignatureBytes { + fn hash(&self, hasher: &mut H) { + self.bytes.hash(hasher); + } +} + /// Serializes the `GenericSignature` in compressed form, storing the bytes in the newly created `Self`. impl From> for GenericSignatureBytes where diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index 35582df380..f2d8b79b98 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -113,6 +113,14 @@ impl PartialEq for Signature { } } +impl Eq for Signature {} + +impl std::hash::Hash for Signature { + fn hash(&self, hasher: &mut H) { + self.0.hash(hasher); + } +} + #[derive(Clone)] pub struct AggregateSignature([u8; SIGNATURE_BYTES_LEN]); diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 245247ba43..a6062e5b8c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.0.1" +version = "2.1.0" authors = ["Paul Hauner "] edition = "2018" @@ -19,12 +19,13 @@ serde_json = "1.0.66" env_logger = "0.9.0" types = { path = "../consensus/types" } state_processing = { path = "../consensus/state_processing" } -eth2_ssz = "0.4.0" +int_to_bytes = { path = "../consensus/int_to_bytes" } +eth2_ssz = "0.4.1" environment = { path = "../lighthouse/environment" } eth2_network_config = { path = "../common/eth2_network_config" } genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../common/deposit_contract" } -tree_hash = "0.4.0" +tree_hash = "0.4.1" clap_utils = { path = "../common/clap_utils" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 47ce737c9f..5a4177ead9 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,13 +1,13 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.53.0 AS builder +FROM rust:1.56.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE RUN cd lighthouse && make install-lcli -FROM debian:buster-slim +FROM ubuntu:latest RUN apt-get update && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli diff --git a/lcli/src/create_payload_header.rs b/lcli/src/create_payload_header.rs new file mode 100644 index 0000000000..814a57f264 --- /dev/null +++ b/lcli/src/create_payload_header.rs @@ -0,0 +1,34 @@ +use clap::ArgMatches; +use clap_utils::{parse_optional, parse_required}; +use ssz::Encode; +use std::fs::File; +use std::io::Write; +use std::time::{SystemTime, UNIX_EPOCH}; +use types::{EthSpec, ExecutionPayloadHeader}; + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let eth1_block_hash = parse_required(matches, "execution-block-hash")?; + let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to get time: {:?}", e))? + .as_secs(), + ); + let base_fee_per_gas = parse_required(matches, "base-fee-per-gas")?; + let gas_limit = parse_required(matches, "gas-limit")?; + let file_name = matches.value_of("file").ok_or("No file supplied")?; + + let execution_payload_header: ExecutionPayloadHeader = ExecutionPayloadHeader { + gas_limit, + base_fee_per_gas, + timestamp: genesis_time, + block_hash: eth1_block_hash, + random: eth1_block_hash, + ..ExecutionPayloadHeader::default() + }; + let mut file = File::create(file_name).map_err(|_| "Unable to create file".to_string())?; + let bytes = execution_payload_header.as_ssz_bytes(); + file.write_all(bytes.as_slice()) + .map_err(|_| "Unable to write to file".to_string())?; + Ok(()) +} diff --git a/lcli/src/etl/block_efficiency.rs b/lcli/src/etl/block_efficiency.rs index 45452735dc..87175ace89 100644 --- a/lcli/src/etl/block_efficiency.rs +++ b/lcli/src/etl/block_efficiency.rs @@ -274,6 +274,9 @@ pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { // Add them to the set. included_attestations_set.extend(attestations_in_block.clone()); + // Remove expired available attestations. + available_attestations_set.retain(|x| x.slot >= (slot.as_u64().saturating_sub(32))); + // Don't write data from the initialization epoch. if epoch != initialization_epoch { let included = attestations_in_block.len(); @@ -309,9 +312,6 @@ pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { } } } - - // Remove expired available attestations. - available_attestations_set.retain(|x| x.slot >= (slot.as_u64().saturating_sub(32))); } let mut offline = "None".to_string(); diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index cb65bb4380..6f39392d12 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -39,7 +39,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - let enr = build_enr::(&enr_key, &config, enr_fork_id) + let enr = build_enr::(&enr_key, &config, &enr_fork_id) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs index 6f35699fca..57a5ba0098 100644 --- a/lcli/src/interop_genesis.rs +++ b/lcli/src/interop_genesis.rs @@ -1,11 +1,11 @@ use clap::ArgMatches; use clap_utils::parse_ssz_optional; use eth2_network_config::Eth2NetworkConfig; -use genesis::interop_genesis_state; +use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use ssz::Encode; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; -use types::{test_utils::generate_deterministic_keypairs, EthSpec}; +use types::{test_utils::generate_deterministic_keypairs, EthSpec, Hash256}; pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { let validator_count = matches @@ -34,7 +34,13 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), } let keypairs = generate_deterministic_keypairs(validator_count); - let genesis_state = interop_genesis_state::(&keypairs, genesis_time, &spec)?; + let genesis_state = interop_genesis_state::( + &keypairs, + genesis_time, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + &spec, + )?; eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); eth2_network_config.force_write_to_file(testnet_dir)?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index e6ebc03e16..a494cd3822 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -2,6 +2,7 @@ extern crate log; mod change_genesis_time; mod check_deposit_data; +mod create_payload_header; mod deploy_deposit_contract; mod eth1_genesis; mod etl; @@ -16,7 +17,7 @@ mod transition_blocks; use clap::{App, Arg, ArgMatches, SubCommand}; use clap_utils::parse_path_with_default_in_home_dir; -use environment::EnvironmentBuilder; +use environment::{EnvironmentBuilder, LoggerConfig}; use parse_ssz::run_parse_ssz; use std::path::PathBuf; use std::process; @@ -271,6 +272,57 @@ fn main() { .help("The mnemonic for key derivation."), ), ) + .subcommand( + SubCommand::with_name("create-payload-header") + .about("Generates an SSZ file containing bytes for an `ExecutionPayloadHeader`. \ + Useful as input for `lcli new-testnet --execution-payload-header FILE`. ") + .arg( + Arg::with_name("execution-block-hash") + .long("execution-block-hash") + .value_name("BLOCK_HASH") + .takes_value(true) + .help("The block hash used when generating an execution payload. This \ + value is used for `execution_payload_header.block_hash` as well as \ + `execution_payload_header.random`") + .required(true) + .default_value( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ) + .arg( + Arg::with_name("genesis-time") + .long("genesis-time") + .value_name("INTEGER") + .takes_value(true) + .help("The genesis time when generating an execution payload.") + ) + .arg( + Arg::with_name("base-fee-per-gas") + .long("base-fee-per-gas") + .value_name("INTEGER") + .takes_value(true) + .help("The base fee per gas field in the execution payload generated.") + .required(true) + .default_value("1000000000"), + ) + .arg( + Arg::with_name("gas-limit") + .long("gas-limit") + .value_name("INTEGER") + .takes_value(true) + .help("The gas limit field in the execution payload generated.") + .required(true) + .default_value("30000000"), + ) + .arg( + Arg::with_name("file") + .long("file") + .value_name("FILE") + .takes_value(true) + .required(true) + .help("Output file"), + ) + ) .subcommand( SubCommand::with_name("new-testnet") .about( @@ -284,6 +336,14 @@ fn main() { .takes_value(false) .help("Overwrites any previous testnet configurations"), ) + .arg( + Arg::with_name("interop-genesis-state") + .long("interop-genesis-state") + .takes_value(false) + .help( + "If present, a interop-style genesis.ssz file will be generated.", + ), + ) .arg( Arg::with_name("min-genesis-time") .long("min-genesis-time") @@ -402,6 +462,45 @@ fn main() { "The epoch at which to enable the Altair hard fork", ), ) + .arg( + Arg::with_name("merge-fork-epoch") + .long("merge-fork-epoch") + .value_name("EPOCH") + .takes_value(true) + .help( + "The epoch at which to enable the Merge hard fork", + ), + ) + .arg( + Arg::with_name("eth1-block-hash") + .long("eth1-block-hash") + .value_name("BLOCK_HASH") + .takes_value(true) + .help("The eth1 block hash used when generating a genesis state."), + ) + .arg( + Arg::with_name("execution-payload-header") + .long("execution-payload-header") + .value_name("FILE") + .takes_value(true) + .required(false) + .help("Path to file containing `ExecutionPayloadHeader` SSZ bytes to be \ + used in the genesis state."), + ) + .arg( + Arg::with_name("validator-count") + .long("validator-count") + .value_name("INTEGER") + .takes_value(true) + .help("The number of validators when generating a genesis state."), + ) + .arg( + Arg::with_name("genesis-time") + .long("genesis-time") + .value_name("INTEGER") + .takes_value(true) + .help("The genesis time when generating a genesis state."), + ) ) .subcommand( SubCommand::with_name("check-deposit-data") @@ -584,8 +683,16 @@ fn run( let env = env_builder .multi_threaded_tokio_runtime() .map_err(|e| format!("should start tokio runtime: {:?}", e))? - .async_logger("trace", None) - .map_err(|e| format!("should start null logger: {:?}", e))? + .initialize_logger(LoggerConfig { + path: None, + debug_level: "trace", + logfile_debug_level: "trace", + log_format: None, + max_log_size: 0, + max_log_number: 0, + compression: false, + }) + .map_err(|e| format!("should start logger: {:?}", e))? .build() .map_err(|e| format!("should build env: {:?}", e))?; @@ -615,6 +722,8 @@ fn run( change_genesis_time::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)) } + ("create-payload-header", Some(matches)) => create_payload_header::run::(matches) + .map_err(|e| format!("Failed to run create-payload-header command: {}", e)), ("replace-state-pubkeys", Some(matches)) => { replace_state_pubkeys::run::(testnet_dir, matches) .map_err(|e| format!("Failed to run replace-state-pubkeys command: {}", e)) diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index e37145bf0d..83dcc2e719 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,8 +1,16 @@ use clap::ArgMatches; use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; use eth2_network_config::Eth2NetworkConfig; +use genesis::interop_genesis_state; +use ssz::Decode; +use ssz::Encode; +use std::fs::File; +use std::io::Read; use std::path::PathBuf; -use types::{Address, Config, EthSpec}; +use std::time::{SystemTime, UNIX_EPOCH}; +use types::{ + test_utils::generate_deterministic_keypairs, Address, Config, EthSpec, ExecutionPayloadHeader, +}; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; @@ -54,10 +62,66 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul spec.altair_fork_epoch = Some(fork_epoch); } + if let Some(fork_epoch) = parse_optional(matches, "merge-fork-epoch")? { + spec.bellatrix_fork_epoch = Some(fork_epoch); + } + + let genesis_state_bytes = if matches.is_present("interop-genesis-state") { + let execution_payload_header: Option> = + parse_optional(matches, "execution-payload-header")? + .map(|filename: String| { + let mut bytes = vec![]; + let mut file = File::open(filename.as_str()) + .map_err(|e| format!("Unable to open {}: {}", filename, e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + ExecutionPayloadHeader::::from_ssz_bytes(bytes.as_slice()) + .map_err(|e| format!("SSZ decode failed: {:?}", e)) + }) + .transpose()?; + + let (eth1_block_hash, genesis_time) = if let Some(payload) = + execution_payload_header.as_ref() + { + let eth1_block_hash = + parse_optional(matches, "eth1-block-hash")?.unwrap_or(payload.block_hash); + let genesis_time = + parse_optional(matches, "genesis-time")?.unwrap_or(payload.timestamp); + (eth1_block_hash, genesis_time) + } else { + let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { + "One of `--execution-payload-header` or `--eth1-block-hash` must be set".to_string() + })?; + let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to get time: {:?}", e))? + .as_secs(), + ); + (eth1_block_hash, genesis_time) + }; + + let validator_count = parse_required(matches, "validator-count")?; + + let keypairs = generate_deterministic_keypairs(validator_count); + + let genesis_state = interop_genesis_state::( + &keypairs, + genesis_time, + eth1_block_hash, + execution_payload_header, + &spec, + )?; + + Some(genesis_state.as_ssz_bytes()) + } else { + None + }; + let testnet = Eth2NetworkConfig { deposit_contract_deploy_block, boot_enr: Some(vec![]), - genesis_state_bytes: None, + genesis_state_bytes, config: Config::from_chain_spec::(&spec), }; diff --git a/lcli/src/replace_state_pubkeys.rs b/lcli/src/replace_state_pubkeys.rs index 8e85f76aed..e9e3388c06 100644 --- a/lcli/src/replace_state_pubkeys.rs +++ b/lcli/src/replace_state_pubkeys.rs @@ -4,10 +4,12 @@ use eth2_network_config::Eth2NetworkConfig; use eth2_wallet::bip39::Seed; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; use ssz::Encode; +use state_processing::common::DepositDataTree; use std::fs::File; use std::io::{Read, Write}; use std::path::PathBuf; -use types::{BeaconState, EthSpec}; +use tree_hash::TreeHash; +use types::{BeaconState, DepositData, EthSpec, Hash256, SignatureBytes, DEPOSIT_TREE_DEPTH}; pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { let path = matches @@ -38,6 +40,8 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), let mnemonic = mnemonic_from_phrase(mnemonic_phrase)?; let seed = Seed::new(&mnemonic, ""); + let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); + let mut deposit_root = Hash256::zero(); for (index, validator) in state.validators_mut().iter_mut().enumerate() { let (secret, _) = recover_validator_secret_from_mnemonic(seed.as_bytes(), index as u32, KeyType::Voting) @@ -49,8 +53,29 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), eprintln!("{}: {}", index, keypair.pk); validator.pubkey = keypair.pk.into(); + + // Update the deposit tree. + let mut deposit_data = DepositData { + pubkey: validator.pubkey, + // Set this to a junk value since it's very time consuming to generate the withdrawal + // keys and it's not useful for the time being. + withdrawal_credentials: Hash256::zero(), + amount: spec.min_deposit_amount, + signature: SignatureBytes::empty(), + }; + deposit_data.signature = deposit_data.create_signature(&keypair.sk, spec); + deposit_tree + .push_leaf(deposit_data.tree_hash_root()) + .map_err(|e| format!("failed to create deposit tree: {:?}", e))?; + deposit_root = deposit_tree.root(); } + // Update the genesis validators root since we changed the validators. + *state.genesis_validators_root_mut() = state.validators().tree_hash_root(); + + // Update the deposit root with our simulated deposits. + state.eth1_data_mut().deposit_root = deposit_root; + let mut file = File::create(path).map_err(|e| format!("Unable to create file: {}", e))?; file.write_all(&state.as_ssz_bytes()) diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 04d15f5a11..f78c6b005e 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -1,7 +1,9 @@ use clap::ArgMatches; use eth2_network_config::Eth2NetworkConfig; use ssz::Encode; -use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy}; +use state_processing::{ + per_block_processing, per_slot_processing, BlockSignatureStrategy, VerifyBlockRoot, +}; use std::fs::File; use std::io::prelude::*; use std::path::{Path, PathBuf}; @@ -77,6 +79,7 @@ fn do_transition( &block, None, BlockSignatureStrategy::VerifyIndividual, + VerifyBlockRoot::True, spec, ) .map_err(|e| format!("State transition failed: {:?}", e))?; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index f3eec21d07..787b992a22 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.0.1" +version = "2.1.0" authors = ["Sigma Prime "] edition = "2018" autotests = false @@ -20,7 +20,7 @@ spec-minimal = [] [dependencies] beacon_node = { "path" = "../beacon_node" } slog = { version = "2.5.2", features = ["max_level_trace"] } -sloggers = "2.0.2" +sloggers = { version = "2.1.1", features = ["json"] } types = { "path" = "../consensus/types" } bls = { path = "../crypto/bls" } eth2_hashing = "0.2.0" @@ -41,6 +41,7 @@ serde = { version = "1.0.116", features = ["derive"] } serde_json = "1.0.59" task_executor = { path = "../common/task_executor" } malloc_utils = { path = "../common/malloc_utils" } +directory = { path = "../common/directory" } [dev-dependencies] tempfile = "3.1.0" diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 29fb173032..ee196e70f1 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] tokio = { version = "1.14.0", features = ["macros", "rt", "rt-multi-thread", "signal" ] } slog = { version = "2.5.2", features = ["max_level_trace"] } -sloggers = "2.0.2" +sloggers = { version = "2.1.1", features = ["json"] } types = { path = "../../consensus/types" } eth2_config = { path = "../../common/eth2_config" } task_executor = { path = "../../common/task_executor" } @@ -18,7 +18,6 @@ slog-async = "2.5.0" futures = "0.3.7" slog-json = "2.3.0" exit-future = "0.2.0" -filesystem = {"path" = "../../common/filesystem"} [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index f48433ec29..e536d3c95b 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -9,17 +9,16 @@ use eth2_config::Eth2Config; use eth2_network_config::Eth2NetworkConfig; -use filesystem::restrict_file_permissions; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; -use slog::{error, info, o, warn, Drain, Level, Logger}; -use sloggers::{null::NullLoggerBuilder, Build}; -use std::ffi::OsStr; -use std::fs::{rename as FsRename, OpenOptions}; +use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; +use sloggers::{ + file::FileLoggerBuilder, null::NullLoggerBuilder, types::Format, types::Severity, Build, +}; +use std::fs::create_dir_all; use std::path::PathBuf; use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; use types::{EthSpec, MainnetEthSpec, MinimalEthSpec}; @@ -38,13 +37,28 @@ const LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; +/// Configuration for logging. +/// Background file logging is disabled if one of: +/// - `path` == None, +/// - `max_log_size` == 0, +/// - `max_log_number` == 0, +pub struct LoggerConfig<'a> { + pub path: Option, + pub debug_level: &'a str, + pub logfile_debug_level: &'a str, + pub log_format: Option<&'a str>, + pub max_log_size: u64, + pub max_log_number: usize, + pub compression: bool, +} + /// Builds an `Environment`. pub struct EnvironmentBuilder { runtime: Option>, log: Option, eth_spec_instance: E, eth2_config: Eth2Config, - testnet: Option, + eth2_network_config: Option, } impl EnvironmentBuilder { @@ -55,7 +69,7 @@ impl EnvironmentBuilder { log: None, eth_spec_instance: MinimalEthSpec, eth2_config: Eth2Config::minimal(), - testnet: None, + eth2_network_config: None, } } } @@ -68,7 +82,7 @@ impl EnvironmentBuilder { log: None, eth_spec_instance: MainnetEthSpec, eth2_config: Eth2Config::mainnet(), - testnet: None, + eth2_network_config: None, } } } @@ -93,118 +107,98 @@ impl EnvironmentBuilder { Ok(self) } - /// Specifies that the `slog` asynchronous logger should be used. Ideal for production. - /// + /// Initializes the logger using the specified configuration. /// The logger is "async" because it has a dedicated thread that accepts logs and then /// asynchronously flushes them to stdout/files/etc. This means the thread that raised the log /// does not have to wait for the logs to be flushed. - pub fn async_logger( - mut self, - debug_level: &str, - log_format: Option<&str>, - ) -> Result { - // Setting up the initial logger format and building it. - let drain = if let Some(format) = log_format { + /// The logger can be duplicated and more detailed logs can be output to `logfile`. + /// Note that background file logging will spawn a new thread. + pub fn initialize_logger(mut self, config: LoggerConfig) -> Result { + // Setting up the initial logger format and build it. + let stdout_drain = if let Some(format) = config.log_format { match format.to_uppercase().as_str() { "JSON" => { - let drain = slog_json::Json::default(std::io::stdout()).fuse(); - slog_async::Async::new(drain) + let stdout_drain = slog_json::Json::default(std::io::stdout()).fuse(); + slog_async::Async::new(stdout_drain) .chan_size(LOG_CHANNEL_SIZE) .build() } _ => return Err("Logging format provided is not supported".to_string()), } } else { - let decorator = slog_term::TermDecorator::new().build(); - let decorator = - logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - slog_async::Async::new(drain) + let stdout_decorator = slog_term::TermDecorator::new().build(); + let stdout_decorator = + logging::AlignedTermDecorator::new(stdout_decorator, logging::MAX_MESSAGE_WIDTH); + let stdout_drain = slog_term::FullFormat::new(stdout_decorator).build().fuse(); + slog_async::Async::new(stdout_drain) .chan_size(LOG_CHANNEL_SIZE) .build() }; - let drain = match debug_level { - "info" => drain.filter_level(Level::Info), - "debug" => drain.filter_level(Level::Debug), - "trace" => drain.filter_level(Level::Trace), - "warn" => drain.filter_level(Level::Warning), - "error" => drain.filter_level(Level::Error), - "crit" => drain.filter_level(Level::Critical), + let stdout_drain = match config.debug_level { + "info" => stdout_drain.filter_level(Level::Info), + "debug" => stdout_drain.filter_level(Level::Debug), + "trace" => stdout_drain.filter_level(Level::Trace), + "warn" => stdout_drain.filter_level(Level::Warning), + "error" => stdout_drain.filter_level(Level::Error), + "crit" => stdout_drain.filter_level(Level::Critical), unknown => return Err(format!("Unknown debug-level: {}", unknown)), }; - self.log = Some(Logger::root(drain.fuse(), o!())); - Ok(self) - } + let stdout_logger = Logger::root(stdout_drain.fuse(), o!()); - /// Sets the logger (and all child loggers) to log to a file. - pub fn log_to_file( - mut self, - path: PathBuf, - debug_level: &str, - log_format: Option<&str>, - ) -> Result { - // Creating a backup if the logfile already exists. - if path.exists() { - let start = SystemTime::now(); - let timestamp = start - .duration_since(UNIX_EPOCH) - .map_err(|e| e.to_string())? - .as_secs(); - let file_stem = path - .file_stem() - .ok_or("Invalid file name")? - .to_str() - .ok_or("Failed to create str from filename")?; - let file_ext = path.extension().unwrap_or_else(|| OsStr::new("")); - let backup_name = format!("{}_backup_{}", file_stem, timestamp); - let backup_path = path.with_file_name(backup_name).with_extension(file_ext); - FsRename(&path, &backup_path).map_err(|e| e.to_string())?; + // Disable file logging if values set to 0. + if config.max_log_size == 0 || config.max_log_number == 0 { + self.log = Some(stdout_logger); + return Ok(self); } - let file = OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(&path) - .map_err(|e| format!("Unable to open logfile: {:?}", e))?; - - restrict_file_permissions(&path) - .map_err(|e| format!("Unable to set file permissions for {:?}: {:?}", path, e))?; - - // Setting up the initial logger format and building it. - let drain = if let Some(format) = log_format { - match format.to_uppercase().as_str() { - "JSON" => { - let drain = slog_json::Json::default(file).fuse(); - slog_async::Async::new(drain) - .chan_size(LOG_CHANNEL_SIZE) - .build() - } - _ => return Err("Logging format provided is not supported".to_string()), + // Disable file logging if no path is specified. + let path = match config.path { + Some(path) => path, + None => { + self.log = Some(stdout_logger); + return Ok(self); } - } else { - let decorator = slog_term::PlainDecorator::new(file); - let decorator = - logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - slog_async::Async::new(drain) - .chan_size(LOG_CHANNEL_SIZE) - .build() }; - let drain = match debug_level { - "info" => drain.filter_level(Level::Info), - "debug" => drain.filter_level(Level::Debug), - "trace" => drain.filter_level(Level::Trace), - "warn" => drain.filter_level(Level::Warning), - "error" => drain.filter_level(Level::Error), - "crit" => drain.filter_level(Level::Critical), - unknown => return Err(format!("Unknown debug-level: {}", unknown)), + // Ensure directories are created becfore the logfile. + if !path.exists() { + let mut dir = path.clone(); + dir.pop(); + + // Create the necessary directories for the correct service and network. + if !dir.exists() { + create_dir_all(dir).map_err(|e| format!("Unable to create directory: {:?}", e))?; + } + } + + let logfile_level = match config.logfile_debug_level { + "info" => Severity::Info, + "debug" => Severity::Debug, + "trace" => Severity::Trace, + "warn" => Severity::Warning, + "error" => Severity::Error, + "crit" => Severity::Critical, + unknown => return Err(format!("Unknown loglevel-debug-level: {}", unknown)), }; - let log = Logger::root(drain.fuse(), o!()); + let file_logger = FileLoggerBuilder::new(&path) + .level(logfile_level) + .channel_size(LOG_CHANNEL_SIZE) + .format(match config.log_format { + Some("JSON") => Format::Json, + _ => Format::default(), + }) + .rotate_size(config.max_log_size) + .rotate_keep(config.max_log_number) + .rotate_compress(config.compression) + .restrict_permissions(true) + .build() + .map_err(|e| format!("Unable to build file logger: {}", e))?; + + let log = Logger::root(Duplicate::new(stdout_logger, file_logger).fuse(), o!()); + info!( log, "Logging to file"; @@ -216,19 +210,19 @@ impl EnvironmentBuilder { Ok(self) } - /// Adds a testnet configuration to the environment. + /// Adds a network configuration to the environment. pub fn eth2_network_config( mut self, eth2_network_config: Eth2NetworkConfig, ) -> Result { // Create a new chain spec from the default configuration. self.eth2_config.spec = eth2_network_config.chain_spec::()?; - self.testnet = Some(eth2_network_config); + self.eth2_network_config = Some(eth2_network_config); Ok(self) } - /// Optionally adds a testnet configuration to the environment. + /// Optionally adds a network configuration to the environment. pub fn optional_eth2_network_config( self, optional_config: Option, @@ -255,7 +249,7 @@ impl EnvironmentBuilder { log: self.log.ok_or("Cannot build environment without log")?, eth_spec_instance: self.eth_spec_instance, eth2_config: self.eth2_config, - testnet: self.testnet, + eth2_network_config: self.eth2_network_config.map(Arc::new), }) } } @@ -269,6 +263,7 @@ pub struct RuntimeContext { pub executor: TaskExecutor, pub eth_spec_instance: E, pub eth2_config: Eth2Config, + pub eth2_network_config: Option>, } impl RuntimeContext { @@ -280,6 +275,7 @@ impl RuntimeContext { executor: self.executor.clone_with_name(service_name), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), + eth2_network_config: self.eth2_network_config.clone(), } } @@ -307,7 +303,7 @@ pub struct Environment { log: Logger, eth_spec_instance: E, pub eth2_config: Eth2Config, - pub testnet: Option, + pub eth2_network_config: Option>, } impl Environment { @@ -330,6 +326,7 @@ impl Environment { ), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), + eth2_network_config: self.eth2_network_config.clone(), } } @@ -344,6 +341,7 @@ impl Environment { ), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), + eth2_network_config: self.eth2_network_config.clone(), } } diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 7d0105cca8..8424a2fdc3 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -3,6 +3,14 @@ # Extends the mainnet preset PRESET_BASE: 'mainnet' +# Transition +# --------------------------------------------------------------- +# TBD, 2**256-2**10 is a placeholder +TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# By default, don't use these params +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + # Genesis # --------------------------------------------------------------- # CUSTOMISED FOR TEST @@ -25,8 +33,8 @@ GENESIS_DELAY: 604800 ALTAIR_FORK_VERSION: 0x01000000 ALTAIR_FORK_EPOCH: 18446744073709551615 # Merge -MERGE_FORK_VERSION: 0x02000000 -MERGE_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_VERSION: 0x02000000 +BELLATRIX_FORK_EPOCH: 18446744073709551615 # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 @@ -62,6 +70,10 @@ MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 +# Fork choice +# --------------------------------------------------------------- +# 70% +PROPOSER_SCORE_BOOST: 70 # Deposit contract # --------------------------------------------------------------- diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 99775d71d5..693b3de821 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -2,11 +2,12 @@ mod metrics; -use beacon_node::{get_eth2_network_config, ProductionBeaconNode}; +use beacon_node::ProductionBeaconNode; use clap::{App, Arg, ArgMatches}; -use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; +use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, get_eth2_network_config}; +use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; use env_logger::{Builder, Env}; -use environment::EnvironmentBuilder; +use environment::{EnvironmentBuilder, LoggerConfig}; use eth2_hashing::have_sha_extensions; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; use lighthouse_version::VERSION; @@ -80,23 +81,68 @@ fn main() { .long("logfile") .value_name("FILE") .help( - "File path where output will be written.", - ) - .takes_value(true), + "File path where the log file will be stored. Once it grows to the \ + value specified in `--logfile-max-size` a new log file is generated where \ + future logs are stored. \ + Once the number of log files exceeds the value specified in \ + `--logfile-max-number` the oldest log file will be overwritten.") + .takes_value(true) + .global(true), + ) + .arg( + Arg::with_name("logfile-debug-level") + .long("logfile-debug-level") + .value_name("LEVEL") + .help("The verbosity level used when emitting logs to the log file.") + .takes_value(true) + .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .default_value("debug") + .global(true), + ) + .arg( + Arg::with_name("logfile-max-size") + .long("logfile-max-size") + .value_name("SIZE") + .help( + "The maximum size (in MB) each log file can grow to before rotating. If set \ + to 0, background file logging is disabled.") + .takes_value(true) + .default_value("200") + .global(true), + ) + .arg( + Arg::with_name("logfile-max-number") + .long("logfile-max-number") + .value_name("COUNT") + .help( + "The maximum number of log files that will be stored. If set to 0, \ + background file logging is disabled.") + .takes_value(true) + .default_value("5") + .global(true), + ) + .arg( + Arg::with_name("logfile-compress") + .long("logfile-compress") + .help( + "If present, compress old log files. This can help reduce the space needed \ + to store old logs.") + .global(true), ) .arg( Arg::with_name("log-format") .long("log-format") .value_name("FORMAT") - .help("Specifies the format used for logging.") + .help("Specifies the log format used when emitting logs to the terminal.") .possible_values(&["JSON"]) - .takes_value(true), + .takes_value(true) + .global(true), ) .arg( Arg::with_name("debug-level") .long("debug-level") .value_name("LEVEL") - .help("The verbosity level for emitting logs.") + .help("Specifies the verbosity level used when emitting logs to the terminal.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) .global(true) @@ -165,6 +211,45 @@ fn main() { ) .global(true), ) + .arg( + Arg::with_name("terminal-total-difficulty-override") + .long("terminal-total-difficulty-override") + .value_name("INTEGER") + .help("Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. \ + Accepts a 256-bit decimal integer (not a hex value). \ + This flag should only be used if the user has a clear understanding that \ + the broad Ethereum community has elected to override the terminal difficulty. \ + Incorrect use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag.") + .takes_value(true) + .global(true) + ) + .arg( + Arg::with_name("terminal-block-hash-override") + .long("terminal-block-hash-override") + .value_name("TERMINAL_BLOCK_HASH") + .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ + This flag should only be used if the user has a clear understanding that \ + the broad Ethereum community has elected to override the terminal PoW block. \ + Incorrect use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag.") + .requires("terminal-block-hash-epoch-override") + .takes_value(true) + .global(true) + ) + .arg( + Arg::with_name("terminal-block-hash-epoch-override") + .long("terminal-block-hash-epoch-override") + .value_name("EPOCH") + .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ + parameter. This flag should only be used if the user has a clear understanding \ + that the broad Ethereum community has elected to override the terminal PoW block. \ + Incorrect use of this flag will cause your node to experience a consensus + failure. Be extremely careful with this flag.") + .requires("terminal-block-hash-override") + .takes_value(true) + .global(true) + ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) .subcommand(validator_client::cli_app()) @@ -193,8 +278,8 @@ fn main() { Builder::from_env(Env::default()).init(); } - let result = get_eth2_network_config(&matches).and_then(|testnet_config| { - let eth_spec_id = testnet_config.eth_spec_id()?; + let result = get_eth2_network_config(&matches).and_then(|eth2_network_config| { + let eth_spec_id = eth2_network_config.eth_spec_id()?; // boot node subcommand circumvents the environment if let Some(bootnode_matches) = matches.subcommand_matches("boot_node") { @@ -204,15 +289,21 @@ fn main() { .expect("Debug-level must be present") .into(); - boot_node::run(&matches, bootnode_matches, eth_spec_id, debug_info); + boot_node::run( + &matches, + bootnode_matches, + eth_spec_id, + ð2_network_config, + debug_info, + ); return Ok(()); } match eth_spec_id { - EthSpecId::Mainnet => run(EnvironmentBuilder::mainnet(), &matches, testnet_config), + EthSpecId::Mainnet => run(EnvironmentBuilder::mainnet(), &matches, eth2_network_config), #[cfg(feature = "spec-minimal")] - EthSpecId::Minimal => run(EnvironmentBuilder::minimal(), &matches, testnet_config), + EthSpecId::Minimal => run(EnvironmentBuilder::minimal(), &matches, eth2_network_config), #[cfg(not(feature = "spec-minimal"))] other => { eprintln!( @@ -242,7 +333,7 @@ fn main() { fn run( environment_builder: EnvironmentBuilder, matches: &ArgMatches, - testnet_config: Eth2NetworkConfig, + eth2_network_config: Eth2NetworkConfig, ) -> Result<(), String> { if std::mem::size_of::() != 8 { return Err(format!( @@ -257,18 +348,61 @@ fn run( let log_format = matches.value_of("log-format"); - let builder = if let Some(log_path) = matches.value_of("logfile") { - let path = log_path - .parse::() - .map_err(|e| format!("Failed to parse log path: {:?}", e))?; - environment_builder.log_to_file(path, debug_level, log_format)? - } else { - environment_builder.async_logger(debug_level, log_format)? + let logfile_debug_level = matches + .value_of("logfile-debug-level") + .ok_or("Expected --logfile-debug-level flag")?; + + let logfile_max_size: u64 = matches + .value_of("logfile-max-size") + .ok_or("Expected --logfile-max-size flag")? + .parse() + .map_err(|e| format!("Failed to parse `logfile-max-size`: {:?}", e))?; + + let logfile_max_number: usize = matches + .value_of("logfile-max-number") + .ok_or("Expected --logfile-max-number flag")? + .parse() + .map_err(|e| format!("Failed to parse `logfile-max-number`: {:?}", e))?; + + let logfile_compress = matches.is_present("logfile-compress"); + + // Construct the path to the log file. + let mut log_path: Option = clap_utils::parse_optional(matches, "logfile")?; + if log_path.is_none() { + log_path = match matches.subcommand_name() { + Some("beacon_node") => Some( + parse_path_or_default(matches, "datadir")? + .join(DEFAULT_BEACON_NODE_DIR) + .join("logs") + .join("beacon") + .with_extension("log"), + ), + Some("validator_client") => Some( + parse_path_or_default(matches, "datadir")? + .join(DEFAULT_VALIDATOR_DIR) + .join("logs") + .join("validator") + .with_extension("log"), + ), + _ => None, + }; + } + + let logger_config = LoggerConfig { + path: log_path, + debug_level, + logfile_debug_level, + log_format, + max_log_size: logfile_max_size * 1_024 * 1_024, + max_log_number: logfile_max_number, + compression: logfile_compress, }; + let builder = environment_builder.initialize_logger(logger_config)?; + let mut environment = builder .multi_threaded_tokio_runtime()? - .optional_eth2_network_config(Some(testnet_config))? + .optional_eth2_network_config(Some(eth2_network_config))? .build()?; let log = environment.core_context().log().clone(); @@ -335,11 +469,7 @@ fn run( let context = environment.core_context(); let log = context.log().clone(); let executor = context.executor.clone(); - let config = beacon_node::get_config::( - matches, - &context.eth2_config().spec, - context.log().clone(), - )?; + let config = beacon_node::get_config::(matches, &context)?; let shutdown_flag = matches.is_present("immediate-shutdown"); if let Some(dump_path) = clap_utils::parse_optional::(matches, "dump-config")? { diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index d985a3d1a7..96be44fcad 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -22,7 +22,7 @@ use std::env; use std::fs::{self, File}; use std::io::{BufRead, BufReader, Write}; use std::path::{Path, PathBuf}; -use std::process::{Command, Output, Stdio}; +use std::process::{Child, Command, Output, Stdio}; use std::str::from_utf8; use tempfile::{tempdir, TempDir}; use types::{Keypair, PublicKey}; @@ -528,6 +528,128 @@ fn validator_import_launchpad() { ); } +#[test] +fn validator_import_launchpad_no_password_then_add_password() { + const PASSWORD: &str = "cats"; + const KEYSTORE_NAME: &str = "keystore-m_12381_3600_0_0_0-1595406747.json"; + const NOT_KEYSTORE_NAME: &str = "keystore-m_12381_3600_0_0-1595406747.json"; + + let src_dir = tempdir().unwrap(); + let dst_dir = tempdir().unwrap(); + + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, PASSWORD.as_bytes(), "".into()) + .unwrap() + .build() + .unwrap(); + + let dst_keystore_dir = dst_dir.path().join(format!("0x{}", keystore.pubkey())); + + // Create a keystore in the src dir. + File::create(src_dir.path().join(KEYSTORE_NAME)) + .map(|mut file| keystore.to_json_writer(&mut file).unwrap()) + .unwrap(); + + // Create a not-keystore file in the src dir. + File::create(src_dir.path().join(NOT_KEYSTORE_NAME)).unwrap(); + + let validator_import_key_cmd = || { + validator_cmd() + .arg(format!("--{}", VALIDATOR_DIR_FLAG)) + .arg(dst_dir.path().as_os_str()) + .arg(IMPORT_CMD) + .arg(format!("--{}", STDIN_INPUTS_FLAG)) // Using tty does not work well with tests. + .arg(format!("--{}", import::DIR_FLAG)) + .arg(src_dir.path().as_os_str()) + .stderr(Stdio::piped()) + .stdin(Stdio::piped()) + .spawn() + .unwrap() + }; + + let wait_for_password_prompt = |child: &mut Child| { + let mut stderr = child.stderr.as_mut().map(BufReader::new).unwrap().lines(); + + loop { + if stderr.next().unwrap().unwrap() == import::PASSWORD_PROMPT { + break; + } + } + }; + + let mut child = validator_import_key_cmd(); + wait_for_password_prompt(&mut child); + let stdin = child.stdin.as_mut().unwrap(); + stdin.write("\n".as_bytes()).unwrap(); + child.wait().unwrap(); + + assert!( + src_dir.path().join(KEYSTORE_NAME).exists(), + "keystore should not be removed from src dir" + ); + assert!( + src_dir.path().join(NOT_KEYSTORE_NAME).exists(), + "not-keystore should not be removed from src dir." + ); + + let voting_keystore_path = dst_keystore_dir.join(KEYSTORE_NAME); + + assert!( + voting_keystore_path.exists(), + "keystore should be present in dst dir" + ); + assert!( + !dst_dir.path().join(NOT_KEYSTORE_NAME).exists(), + "not-keystore should not be present in dst dir" + ); + + // Validator should be registered with slashing protection. + check_slashing_protection(&dst_dir, std::iter::once(keystore.public_key().unwrap())); + + let defs = ValidatorDefinitions::open(&dst_dir).unwrap(); + + let expected_def = ValidatorDefinition { + enabled: true, + description: "".into(), + graffiti: None, + voting_public_key: keystore.public_key().unwrap(), + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path, + voting_keystore_password_path: None, + voting_keystore_password: None, + }, + }; + + assert!( + defs.as_slice() == &[expected_def.clone()], + "validator defs file should be accurate" + ); + + let mut child = validator_import_key_cmd(); + wait_for_password_prompt(&mut child); + let stdin = child.stdin.as_mut().unwrap(); + stdin.write(format!("{}\n", PASSWORD).as_bytes()).unwrap(); + child.wait().unwrap(); + + let expected_def = ValidatorDefinition { + enabled: true, + description: "".into(), + graffiti: None, + voting_public_key: keystore.public_key().unwrap(), + signing_definition: SigningDefinition::LocalKeystore { + voting_keystore_path: dst_keystore_dir.join(KEYSTORE_NAME), + voting_keystore_password_path: None, + voting_keystore_password: Some(ZeroizeString::from(PASSWORD.to_string())), + }, + }; + + let defs = ValidatorDefinitions::open(&dst_dir).unwrap(); + assert!( + defs.as_slice() == &[expected_def.clone()], + "validator defs file should be accurate" + ); +} + #[test] fn validator_import_launchpad_password_file() { const PASSWORD: &str = "cats"; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index b8dd31beb5..6d03cafe10 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -11,7 +11,7 @@ use std::process::Command; use std::str::FromStr; use std::string::ToString; use tempfile::TempDir; -use types::{Checkpoint, Epoch, Hash256}; +use types::{Address, Checkpoint, Epoch, Hash256}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -206,6 +206,24 @@ fn eth1_purge_cache_flag() { .with_config(|config| assert!(config.eth1.purge_cache)); } +// Tests for Merge flags. +#[test] +fn merge_fee_recipient_flag() { + CommandLineTest::new() + .flag("merge", None) + .flag( + "fee-recipient", + Some("0x00000000219ab540356cbb839cbe05303d7705fa"), + ) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.suggested_fee_recipient, + Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) + ) + }); +} + // Tests for Network flags. #[test] fn network_dir_flag() { @@ -670,7 +688,6 @@ fn no_reconstruct_historic_states_flag() { fn slasher_flag() { CommandLineTest::new() .flag("slasher", None) - .flag("slasher-max-db-size", Some("16")) .run_with_zero_port() .with_config_and_dir(|config, dir| { if let Some(slasher_config) = &config.slasher { @@ -689,7 +706,6 @@ fn slasher_dir_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-dir", dir.path().as_os_str().to_str()) - .flag("slasher-max-db-size", Some("16")) .run_with_zero_port() .with_config(|config| { if let Some(slasher_config) = &config.slasher { @@ -703,7 +719,6 @@ fn slasher_dir_flag() { fn slasher_update_period_flag() { CommandLineTest::new() .flag("slasher", None) - .flag("slasher-max-db-size", Some("16")) .flag("slasher-update-period", Some("100")) .run_with_zero_port() .with_config(|config| { @@ -715,21 +730,21 @@ fn slasher_update_period_flag() { }); } #[test] -fn slasher_slot_offset() { - // TODO: check that the offset is actually stored, once the config is un-hacked - // See: https://github.com/sigp/lighthouse/pull/2767#discussion_r741610402 +fn slasher_slot_offset_flag() { CommandLineTest::new() .flag("slasher", None) - .flag("slasher-max-db-size", Some("16")) .flag("slasher-slot-offset", Some("11.25")) - .run(); + .run() + .with_config(|config| { + let slasher_config = config.slasher.as_ref().unwrap(); + assert_eq!(slasher_config.slot_offset, 11.25); + }); } #[test] #[should_panic] -fn slasher_slot_offset_nan() { +fn slasher_slot_offset_nan_flag() { CommandLineTest::new() .flag("slasher", None) - .flag("slasher-max-db-size", Some("16")) .flag("slasher-slot-offset", Some("NaN")) .run(); } @@ -737,7 +752,6 @@ fn slasher_slot_offset_nan() { fn slasher_history_length_flag() { CommandLineTest::new() .flag("slasher", None) - .flag("slasher-max-db-size", Some("16")) .flag("slasher-history-length", Some("2048")) .run_with_zero_port() .with_config(|config| { @@ -763,11 +777,24 @@ fn slasher_max_db_size_flag() { }); } #[test] +fn slasher_attestation_cache_size_flag() { + CommandLineTest::new() + .flag("slasher", None) + .flag("slasher-att-cache-size", Some("10000")) + .run() + .with_config(|config| { + let slasher_config = config + .slasher + .as_ref() + .expect("Unable to parse Slasher config"); + assert_eq!(slasher_config.attestation_root_cache_size, 10000); + }); +} +#[test] fn slasher_chunk_size_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-chunk-size", Some("32")) - .flag("slasher-max-db-size", Some("16")) .run_with_zero_port() .with_config(|config| { let slasher_config = config @@ -781,7 +808,6 @@ fn slasher_chunk_size_flag() { fn slasher_validator_chunk_size_flag() { CommandLineTest::new() .flag("slasher", None) - .flag("slasher-max-db-size", Some("16")) .flag("slasher-validator-chunk-size", Some("512")) .run_with_zero_port() .with_config(|config| { @@ -797,7 +823,6 @@ fn slasher_broadcast_flag() { CommandLineTest::new() .flag("slasher", None) .flag("slasher-broadcast", None) - .flag("slasher-max-db-size", Some("16")) .run_with_zero_port() .with_config(|config| { let slasher_config = config diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 04437aca9f..7b3c3acb3c 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -1,8 +1,8 @@ use boot_node::config::BootNodeConfigSerialization; use crate::exec::{CommandLineTestExec, CompletedTest}; -use beacon_node::get_eth2_network_config; use clap::ArgMatches; +use clap_utils::get_eth2_network_config; use lighthouse_network::discovery::ENR_FILENAME; use lighthouse_network::Enr; use std::fs::File; @@ -139,9 +139,25 @@ fn enr_port_flag() { }) } -// TODO add tests for flags `enable-enr-auto-update` and `disable-packet-filter`. -// -// These options end up in `Discv5Config`, which doesn't support serde (de)serialization. +#[test] +fn disable_packet_filter_flag() { + CommandLineTest::new() + .flag("disable-packet-filter", None) + .run_with_ip() + .with_config(|config| { + assert_eq!(config.disable_packet_filter, true); + }); +} + +#[test] +fn enable_enr_auto_update_flag() { + CommandLineTest::new() + .flag("enable-enr-auto-update", None) + .run_with_ip() + .with_config(|config| { + assert_eq!(config.enable_enr_auto_update, true); + }); +} #[test] fn network_dir_flag() { diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index b51b38f0fd..c319c2de1a 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -7,22 +7,22 @@ edition = "2018" [dependencies] bincode = "1.3.1" byteorder = "1.3.4" -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } filesystem = { path = "../common/filesystem" } -lmdb = "0.8" -lmdb-sys = "0.8" +mdbx = { package = "libmdbx", version = "0.1.0" } +lru = "0.7.1" parking_lot = "0.11.0" rand = "0.7.3" safe_arith = { path = "../consensus/safe_arith" } serde = "1.0" serde_derive = "1.0" slog = "2.5.2" -sloggers = "2.0.2" -tree_hash = "0.4.0" +sloggers = { version = "2.1.1", features = ["json"] } +tree_hash = "0.4.1" tree_hash_derive = "0.4.0" types = { path = "../consensus/types" } diff --git a/slasher/service/src/service.rs b/slasher/service/src/service.rs index 510ed6cd98..88feff0bbc 100644 --- a/slasher/service/src/service.rs +++ b/slasher/service/src/service.rs @@ -128,7 +128,7 @@ impl SlasherService { log, "Error during scheduled slasher processing"; "epoch" => current_epoch, - "error" => format!("{:?}", e) + "error" => ?e, ); None } @@ -136,13 +136,13 @@ impl SlasherService { drop(batch_timer); // Prune the database, even in the case where batch processing failed. - // If the LMDB database is full then pruning could help to free it up. + // If the database is full then pruning could help to free it up. if let Err(e) = slasher.prune_database(current_epoch) { error!( log, "Error during slasher database pruning"; "epoch" => current_epoch, - "error" => format!("{:?}", e), + "error" => ?e, ); continue; }; diff --git a/slasher/src/array.rs b/slasher/src/array.rs index 545c0b7e6f..d9f1fab819 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -1,8 +1,9 @@ use crate::metrics::{self, SLASHER_COMPRESSION_RATIO, SLASHER_NUM_CHUNKS_UPDATED}; +use crate::RwTransaction; use crate::{AttesterSlashingStatus, Config, Error, IndexedAttesterRecord, SlasherDB}; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; -use lmdb::{RwTransaction, Transaction}; use serde_derive::{Deserialize, Serialize}; +use std::borrow::{Borrow, Cow}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::convert::TryFrom; use std::io::Read; @@ -146,7 +147,10 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch; - fn select_db(db: &SlasherDB) -> lmdb::Database; + fn select_db<'txn, E: EthSpec>( + db: &SlasherDB, + txn: &'txn RwTransaction<'txn>, + ) -> Result, Error>; fn load( db: &SlasherDB, @@ -156,13 +160,13 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn config: &Config, ) -> Result, Error> { let disk_key = config.disk_key(validator_chunk_index, chunk_index); - let chunk_bytes = match txn.get(Self::select_db(db), &disk_key.to_be_bytes()) { - Ok(chunk_bytes) => chunk_bytes, - Err(lmdb::Error::NotFound) => return Ok(None), - Err(e) => return Err(e.into()), - }; + let chunk_bytes: Cow<[u8]> = + match txn.get(&Self::select_db(db, txn)?, &disk_key.to_be_bytes())? { + Some(chunk_bytes) => chunk_bytes, + None => return Ok(None), + }; - let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes))?; + let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes.borrow()))?; Ok(Some(chunk)) } @@ -185,7 +189,7 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn metrics::set_float_gauge(&SLASHER_COMPRESSION_RATIO, compression_ratio); txn.put( - Self::select_db(db), + &Self::select_db(db, txn)?, &disk_key.to_be_bytes(), &compressed_value, SlasherDB::::write_flags(), @@ -292,8 +296,11 @@ impl TargetArrayChunk for MinTargetChunk { start_epoch / chunk_size * chunk_size - 1 } - fn select_db(db: &SlasherDB) -> lmdb::Database { - db.min_targets_db + fn select_db<'txn, E: EthSpec>( + db: &SlasherDB, + txn: &'txn RwTransaction<'txn>, + ) -> Result, Error> { + db.min_targets_db(txn) } } @@ -391,8 +398,11 @@ impl TargetArrayChunk for MaxTargetChunk { (start_epoch / chunk_size + 1) * chunk_size } - fn select_db(db: &SlasherDB) -> lmdb::Database { - db.max_targets_db + fn select_db<'txn, E: EthSpec>( + db: &SlasherDB, + txn: &'txn RwTransaction<'txn>, + ) -> Result, Error> { + db.max_targets_db(txn) } } diff --git a/slasher/src/attester_record.rs b/slasher/src/attester_record.rs index 310118e1ae..498e8d49f0 100644 --- a/slasher/src/attester_record.rs +++ b/slasher/src/attester_record.rs @@ -1,17 +1,53 @@ +use crate::{database::IndexedAttestationId, Error}; use ssz_derive::{Decode, Encode}; -use std::sync::Arc; +use std::borrow::Cow; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, +}; use tree_hash::TreeHash as _; use tree_hash_derive::TreeHash; use types::{AggregateSignature, EthSpec, Hash256, IndexedAttestation, VariableList}; -#[derive(Debug, Clone, Copy, Encode, Decode)] +#[derive(Debug, Clone, Copy)] pub struct AttesterRecord { - /// The hash of the attestation data, for checking double-voting. + /// The hash of the attestation data, for de-duplication. pub attestation_data_hash: Hash256, /// The hash of the indexed attestation, so it can be loaded. pub indexed_attestation_hash: Hash256, } +#[derive(Debug, Clone, Copy)] +pub struct CompactAttesterRecord { + /// The ID of the `IndexedAttestation` signed by this validator. + pub indexed_attestation_id: IndexedAttestationId, +} + +impl CompactAttesterRecord { + pub fn new(indexed_attestation_id: IndexedAttestationId) -> Self { + Self { + indexed_attestation_id, + } + } + + pub fn null() -> Self { + Self::new(IndexedAttestationId::null()) + } + + pub fn parse(bytes: Cow<[u8]>) -> Result { + let id = IndexedAttestationId::parse(bytes)?; + Ok(Self::new(IndexedAttestationId::new(id))) + } + + pub fn is_null(&self) -> bool { + self.indexed_attestation_id.is_null() + } + + pub fn as_bytes(&self) -> &[u8] { + self.indexed_attestation_id.as_ref() + } +} + /// Bundling of an `IndexedAttestation` with an `AttesterRecord`. /// /// This struct gets `Arc`d and passed around between each stage of queueing and processing. @@ -19,11 +55,26 @@ pub struct AttesterRecord { pub struct IndexedAttesterRecord { pub indexed: IndexedAttestation, pub record: AttesterRecord, + pub indexed_attestation_id: AtomicU64, } impl IndexedAttesterRecord { pub fn new(indexed: IndexedAttestation, record: AttesterRecord) -> Arc { - Arc::new(IndexedAttesterRecord { indexed, record }) + Arc::new(IndexedAttesterRecord { + indexed, + record, + indexed_attestation_id: AtomicU64::new(0), + }) + } + + pub fn set_id(&self, id: u64) { + self.indexed_attestation_id + .compare_exchange(0, id, Ordering::Relaxed, Ordering::Relaxed) + .expect("IDs should only be initialized once"); + } + + pub fn get_id(&self) -> u64 { + self.indexed_attestation_id.load(Ordering::Relaxed) } } diff --git a/slasher/src/config.rs b/slasher/src/config.rs index f8fcc1c02b..81aa4b597d 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -9,14 +9,11 @@ pub const DEFAULT_HISTORY_LENGTH: usize = 4096; pub const DEFAULT_UPDATE_PERIOD: u64 = 12; pub const DEFAULT_SLOT_OFFSET: f64 = 10.5; pub const DEFAULT_MAX_DB_SIZE: usize = 256 * 1024; // 256 GiB +pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: usize = 100_000; pub const DEFAULT_BROADCAST: bool = false; -/// Database size to use for tests. -/// -/// Mostly a workaround for Windows due to a bug in LMDB, see: -/// -/// https://github.com/sigp/lighthouse/issues/2342 -pub const TESTING_MAX_DB_SIZE: usize = 16; // MiB +pub const MAX_HISTORY_LENGTH: usize = 1 << 16; +pub const MDBX_GROWTH_STEP: isize = 256 * (1 << 20); // 256 MiB #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -28,16 +25,21 @@ pub struct Config { /// Update frequency in seconds. pub update_period: u64, /// Offset from the start of the slot to begin processing. - #[serde(skip, default = "default_slot_offset")] pub slot_offset: f64, - /// Maximum size of the LMDB database in megabytes. + /// Maximum size of the database in megabytes. pub max_db_size_mbs: usize, + /// Maximum size of the in-memory cache for attestation roots. + pub attestation_root_cache_size: usize, /// Whether to broadcast slashings found to the network. pub broadcast: bool, } -fn default_slot_offset() -> f64 { - DEFAULT_SLOT_OFFSET +/// Immutable configuration parameters which are stored on disk and checked for consistency. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct DiskConfig { + pub chunk_size: usize, + pub validator_chunk_size: usize, + pub history_length: usize, } impl Config { @@ -50,16 +52,11 @@ impl Config { update_period: DEFAULT_UPDATE_PERIOD, slot_offset: DEFAULT_SLOT_OFFSET, max_db_size_mbs: DEFAULT_MAX_DB_SIZE, + attestation_root_cache_size: DEFAULT_ATTESTATION_ROOT_CACHE_SIZE, broadcast: DEFAULT_BROADCAST, } } - /// Use a smaller max DB size for testing. - pub fn for_testing(mut self) -> Self { - self.max_db_size_mbs = TESTING_MAX_DB_SIZE; - self - } - pub fn validate(&self) -> Result<(), Error> { if self.chunk_size == 0 || self.validator_chunk_size == 0 @@ -74,15 +71,22 @@ impl Config { chunk_size: self.chunk_size, history_length: self.history_length, }) + } else if self.history_length > MAX_HISTORY_LENGTH { + Err(Error::ConfigInvalidHistoryLength { + history_length: self.history_length, + max_history_length: MAX_HISTORY_LENGTH, + }) } else { Ok(()) } } - pub fn is_compatible(&self, other: &Config) -> bool { - self.chunk_size == other.chunk_size - && self.validator_chunk_size == other.validator_chunk_size - && self.history_length == other.history_length + pub fn disk_config(&self) -> DiskConfig { + DiskConfig { + chunk_size: self.chunk_size, + validator_chunk_size: self.validator_chunk_size, + history_length: self.history_length, + } } pub fn chunk_index(&self, epoch: Epoch) -> usize { diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 7576d18483..653eccfa72 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -1,26 +1,41 @@ +use crate::config::MDBX_GROWTH_STEP; use crate::{ - utils::{TxnMapFull, TxnOptional}, - AttesterRecord, AttesterSlashingStatus, Config, Error, ProposerSlashingStatus, + metrics, utils::TxnMapFull, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, + Config, Environment, Error, ProposerSlashingStatus, RwTransaction, }; use byteorder::{BigEndian, ByteOrder}; -use lmdb::{Cursor, Database, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags}; -use serde::Deserialize; +use lru::LruCache; +use mdbx::{Database, DatabaseFlags, Geometry, WriteFlags}; +use parking_lot::Mutex; +use serde::de::DeserializeOwned; +use slog::{info, Logger}; use ssz::{Decode, Encode}; +use std::borrow::{Borrow, Cow}; use std::marker::PhantomData; +use std::ops::Range; +use std::path::Path; use std::sync::Arc; +use tree_hash::TreeHash; use types::{ Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, SignedBeaconBlockHeader, Slot, }; /// Current database schema version, to check compatibility of on-disk DB with software. -pub const CURRENT_SCHEMA_VERSION: u64 = 2; +pub const CURRENT_SCHEMA_VERSION: u64 = 3; /// Metadata about the slashing database itself. const METADATA_DB: &str = "metadata"; -/// Map from `(target_epoch, validator_index)` to `AttesterRecord`. +/// Map from `(target_epoch, validator_index)` to `CompactAttesterRecord`. const ATTESTERS_DB: &str = "attesters"; -/// Map from `(target_epoch, indexed_attestation_hash)` to `IndexedAttestation`. +/// Companion database for the attesters DB mapping `validator_index` to largest `target_epoch` +/// stored for that validator in the attesters DB. +/// +/// Used to implement wrap-around semantics for target epochs modulo the history length. +const ATTESTERS_MAX_TARGETS_DB: &str = "attesters_max_targets"; +/// Map from `indexed_attestation_id` to `IndexedAttestation`. const INDEXED_ATTESTATION_DB: &str = "indexed_attestations"; +/// Map from `(target_epoch, indexed_attestation_hash)` to `indexed_attestation_id`. +const INDEXED_ATTESTATION_ID_DB: &str = "indexed_attestation_ids"; /// Table of minimum targets for every source epoch within range. const MIN_TARGETS_DB: &str = "min_targets"; /// Table of maximum targets for every source epoch within range. @@ -32,31 +47,31 @@ const CURRENT_EPOCHS_DB: &str = "current_epochs"; /// Map from `(slot, validator_index)` to `SignedBeaconBlockHeader`. const PROPOSERS_DB: &str = "proposers"; -/// The number of DBs for LMDB to use (equal to the number of DBs defined above). -const LMDB_MAX_DBS: u32 = 7; +/// The number of DBs for MDBX to use (equal to the number of DBs defined above). +const MAX_NUM_DBS: usize = 9; + +/// Filename for the legacy (LMDB) database file, so that it may be deleted. +const LEGACY_DB_FILENAME: &str = "data.mdb"; +const LEGACY_DB_LOCK_FILENAME: &str = "lock.mdb"; /// Constant key under which the schema version is stored in the `metadata_db`. const METADATA_VERSION_KEY: &[u8] = &[0]; /// Constant key under which the slasher configuration is stored in the `metadata_db`. const METADATA_CONFIG_KEY: &[u8] = &[1]; -const ATTESTER_KEY_SIZE: usize = 16; +const ATTESTER_KEY_SIZE: usize = 7; const PROPOSER_KEY_SIZE: usize = 16; const CURRENT_EPOCH_KEY_SIZE: usize = 8; -const INDEXED_ATTESTATION_KEY_SIZE: usize = 40; +const INDEXED_ATTESTATION_ID_SIZE: usize = 6; +const INDEXED_ATTESTATION_ID_KEY_SIZE: usize = 40; const MEGABYTE: usize = 1 << 20; #[derive(Debug)] pub struct SlasherDB { pub(crate) env: Environment, - pub(crate) indexed_attestation_db: Database, - pub(crate) attesters_db: Database, - pub(crate) min_targets_db: Database, - pub(crate) max_targets_db: Database, - pub(crate) current_epochs_db: Database, - pub(crate) proposers_db: Database, - pub(crate) metadata_db: Database, - config: Arc, + /// LRU cache mapping indexed attestation IDs to their attestation data roots. + attestation_root_cache: Mutex>, + pub(crate) config: Arc, _phantom: PhantomData, } @@ -64,27 +79,27 @@ pub struct SlasherDB { /// /// Stored as big-endian `(target_epoch, validator_index)` to enable efficient iteration /// while pruning. +/// +/// The target epoch is stored in 2 bytes modulo the `history_length`. +/// +/// The validator index is stored in 5 bytes (validator registry limit is 2^40). #[derive(Debug)] pub struct AttesterKey { data: [u8; ATTESTER_KEY_SIZE], } impl AttesterKey { - pub fn new(validator_index: u64, target_epoch: Epoch) -> Self { + pub fn new(validator_index: u64, target_epoch: Epoch, config: &Config) -> Self { let mut data = [0; ATTESTER_KEY_SIZE]; - data[0..8].copy_from_slice(&target_epoch.as_u64().to_be_bytes()); - data[8..ATTESTER_KEY_SIZE].copy_from_slice(&validator_index.to_be_bytes()); - AttesterKey { data } - } - pub fn parse(data: &[u8]) -> Result<(Epoch, u64), Error> { - if data.len() == ATTESTER_KEY_SIZE { - let target_epoch = Epoch::new(BigEndian::read_u64(&data[..8])); - let validator_index = BigEndian::read_u64(&data[8..]); - Ok((target_epoch, validator_index)) - } else { - Err(Error::AttesterKeyCorrupt { length: data.len() }) - } + BigEndian::write_uint( + &mut data[..2], + target_epoch.as_u64() % config.history_length as u64, + 2, + ); + BigEndian::write_uint(&mut data[2..], validator_index, 5); + + AttesterKey { data } } } @@ -111,7 +126,7 @@ impl ProposerKey { ProposerKey { data } } - pub fn parse(data: &[u8]) -> Result<(Slot, u64), Error> { + pub fn parse(data: Cow<[u8]>) -> Result<(Slot, u64), Error> { if data.len() == PROPOSER_KEY_SIZE { let slot = Slot::new(BigEndian::read_u64(&data[..8])); let validator_index = BigEndian::read_u64(&data[8..]); @@ -148,93 +163,213 @@ impl AsRef<[u8]> for CurrentEpochKey { } /// Key containing an epoch and an indexed attestation hash. -pub struct IndexedAttestationKey { - target_and_root: [u8; INDEXED_ATTESTATION_KEY_SIZE], +pub struct IndexedAttestationIdKey { + target_and_root: [u8; INDEXED_ATTESTATION_ID_KEY_SIZE], } -impl IndexedAttestationKey { +impl IndexedAttestationIdKey { pub fn new(target_epoch: Epoch, indexed_attestation_root: Hash256) -> Self { - let mut data = [0; INDEXED_ATTESTATION_KEY_SIZE]; + let mut data = [0; INDEXED_ATTESTATION_ID_KEY_SIZE]; data[0..8].copy_from_slice(&target_epoch.as_u64().to_be_bytes()); - data[8..INDEXED_ATTESTATION_KEY_SIZE].copy_from_slice(indexed_attestation_root.as_bytes()); + data[8..INDEXED_ATTESTATION_ID_KEY_SIZE] + .copy_from_slice(indexed_attestation_root.as_bytes()); Self { target_and_root: data, } } - pub fn parse(data: &[u8]) -> Result<(Epoch, Hash256), Error> { - if data.len() == INDEXED_ATTESTATION_KEY_SIZE { + pub fn parse(data: Cow<[u8]>) -> Result<(Epoch, Hash256), Error> { + if data.len() == INDEXED_ATTESTATION_ID_KEY_SIZE { let target_epoch = Epoch::new(BigEndian::read_u64(&data[..8])); let indexed_attestation_root = Hash256::from_slice(&data[8..]); Ok((target_epoch, indexed_attestation_root)) } else { - Err(Error::IndexedAttestationKeyCorrupt { length: data.len() }) + Err(Error::IndexedAttestationIdKeyCorrupt { length: data.len() }) } } } -impl AsRef<[u8]> for IndexedAttestationKey { +impl AsRef<[u8]> for IndexedAttestationIdKey { fn as_ref(&self) -> &[u8] { &self.target_and_root } } +/// Key containing a 6-byte indexed attestation ID. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct IndexedAttestationId { + id: [u8; INDEXED_ATTESTATION_ID_SIZE], +} + +impl IndexedAttestationId { + pub fn new(id: u64) -> Self { + let mut data = [0; INDEXED_ATTESTATION_ID_SIZE]; + BigEndian::write_uint(&mut data, id, INDEXED_ATTESTATION_ID_SIZE); + Self { id: data } + } + + pub fn parse(data: Cow<[u8]>) -> Result { + if data.len() == INDEXED_ATTESTATION_ID_SIZE { + Ok(BigEndian::read_uint( + data.borrow(), + INDEXED_ATTESTATION_ID_SIZE, + )) + } else { + Err(Error::IndexedAttestationIdCorrupt { length: data.len() }) + } + } + + pub fn null() -> Self { + Self::new(0) + } + + pub fn is_null(&self) -> bool { + self.id == [0, 0, 0, 0, 0, 0] + } + + pub fn as_u64(&self) -> u64 { + BigEndian::read_uint(&self.id, INDEXED_ATTESTATION_ID_SIZE) + } +} + +impl AsRef<[u8]> for IndexedAttestationId { + fn as_ref(&self) -> &[u8] { + &self.id + } +} + +/// Bincode deserialization specialised to `Cow<[u8]>`. +fn bincode_deserialize(bytes: Cow<[u8]>) -> Result { + Ok(bincode::deserialize(bytes.borrow())?) +} + +fn ssz_decode(bytes: Cow<[u8]>) -> Result { + Ok(T::from_ssz_bytes(bytes.borrow())?) +} + impl SlasherDB { - pub fn open(config: Arc) -> Result { + pub fn open(config: Arc, log: Logger) -> Result { + // Delete any legacy LMDB database. + Self::delete_legacy_file(&config.database_path, LEGACY_DB_FILENAME, &log)?; + Self::delete_legacy_file(&config.database_path, LEGACY_DB_LOCK_FILENAME, &log)?; + std::fs::create_dir_all(&config.database_path)?; + let env = Environment::new() - .set_max_dbs(LMDB_MAX_DBS) - .set_map_size(config.max_db_size_mbs * MEGABYTE) + .set_max_dbs(MAX_NUM_DBS) + .set_geometry(Self::geometry(&config)) .open_with_permissions(&config.database_path, 0o600)?; - let indexed_attestation_db = - env.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; - let attesters_db = env.create_db(Some(ATTESTERS_DB), Self::db_flags())?; - let min_targets_db = env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; - let max_targets_db = env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; - let current_epochs_db = env.create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?; - let proposers_db = env.create_db(Some(PROPOSERS_DB), Self::db_flags())?; - let metadata_db = env.create_db(Some(METADATA_DB), Self::db_flags())?; + + let txn = env.begin_rw_txn()?; + txn.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; + txn.create_db(Some(INDEXED_ATTESTATION_ID_DB), Self::db_flags())?; + txn.create_db(Some(ATTESTERS_DB), Self::db_flags())?; + txn.create_db(Some(ATTESTERS_MAX_TARGETS_DB), Self::db_flags())?; + txn.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; + txn.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; + txn.create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?; + txn.create_db(Some(PROPOSERS_DB), Self::db_flags())?; + txn.create_db(Some(METADATA_DB), Self::db_flags())?; + txn.commit()?; #[cfg(windows)] { use filesystem::restrict_file_permissions; - let data = config.database_path.join("data.mdb"); - let lock = config.database_path.join("lock.mdb"); + let data = config.database_path.join("mdbx.dat"); + let lock = config.database_path.join("mdbx.lck"); restrict_file_permissions(data).map_err(Error::DatabasePermissionsError)?; restrict_file_permissions(lock).map_err(Error::DatabasePermissionsError)?; } - let db = Self { + let attestation_root_cache = Mutex::new(LruCache::new(config.attestation_root_cache_size)); + + let mut db = Self { env, - indexed_attestation_db, - attesters_db, - min_targets_db, - max_targets_db, - current_epochs_db, - proposers_db, - metadata_db, + attestation_root_cache, config, _phantom: PhantomData, }; + db = db.migrate()?; + let mut txn = db.begin_rw_txn()?; - - db.migrate(&mut txn)?; - if let Some(on_disk_config) = db.load_config(&mut txn)? { - if !db.config.is_compatible(&on_disk_config) { + let current_disk_config = db.config.disk_config(); + if current_disk_config != on_disk_config { return Err(Error::ConfigIncompatible { on_disk_config, - config: (*db.config).clone(), + config: current_disk_config, }); } } - db.store_config(&db.config, &mut txn)?; txn.commit()?; Ok(db) } + fn delete_legacy_file(slasher_dir: &Path, filename: &str, log: &Logger) -> Result<(), Error> { + let path = slasher_dir.join(filename); + + if path.is_file() { + info!( + log, + "Deleting legacy slasher DB"; + "file" => ?path.display(), + ); + std::fs::remove_file(&path)?; + } + Ok(()) + } + + fn open_db<'a>(&self, txn: &'a RwTransaction<'a>, name: &str) -> Result, Error> { + Ok(txn.open_db(Some(name))?) + } + + pub fn indexed_attestation_db<'a>( + &self, + txn: &'a RwTransaction<'a>, + ) -> Result, Error> { + self.open_db(txn, INDEXED_ATTESTATION_DB) + } + + pub fn indexed_attestation_id_db<'a>( + &self, + txn: &'a RwTransaction<'a>, + ) -> Result, Error> { + self.open_db(txn, INDEXED_ATTESTATION_ID_DB) + } + + pub fn attesters_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result, Error> { + self.open_db(txn, ATTESTERS_DB) + } + + pub fn attesters_max_targets_db<'a>( + &self, + txn: &'a RwTransaction<'a>, + ) -> Result, Error> { + self.open_db(txn, ATTESTERS_MAX_TARGETS_DB) + } + + pub fn min_targets_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result, Error> { + self.open_db(txn, MIN_TARGETS_DB) + } + + pub fn max_targets_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result, Error> { + self.open_db(txn, MAX_TARGETS_DB) + } + + pub fn current_epochs_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result, Error> { + self.open_db(txn, CURRENT_EPOCHS_DB) + } + + pub fn proposers_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result, Error> { + self.open_db(txn, PROPOSERS_DB) + } + + pub fn metadata_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result, Error> { + self.open_db(txn, METADATA_DB) + } + pub fn db_flags() -> DatabaseFlags { DatabaseFlags::default() } @@ -247,17 +382,24 @@ impl SlasherDB { Ok(self.env.begin_rw_txn()?) } + pub fn geometry(config: &Config) -> Geometry> { + Geometry { + size: Some(0..config.max_db_size_mbs * MEGABYTE), + growth_step: Some(MDBX_GROWTH_STEP), + shrink_threshold: None, + page_size: None, + } + } + pub fn load_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result, Error> { - Ok(txn - .get(self.metadata_db, &METADATA_VERSION_KEY) - .optional()? - .map(bincode::deserialize) - .transpose()?) + txn.get(&self.metadata_db(txn)?, METADATA_VERSION_KEY)? + .map(bincode_deserialize) + .transpose() } pub fn store_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( - self.metadata_db, + &self.metadata_db(txn)?, &METADATA_VERSION_KEY, &bincode::serialize(&CURRENT_SCHEMA_VERSION)?, Self::write_flags(), @@ -269,20 +411,18 @@ impl SlasherDB { /// /// This is generic in order to allow loading of configs for different schema versions. /// Care should be taken to ensure it is only called for `Config`-like `T`. - pub fn load_config<'a, T: Deserialize<'a>>( + pub fn load_config( &self, - txn: &'a mut RwTransaction<'_>, + txn: &mut RwTransaction<'_>, ) -> Result, Error> { - Ok(txn - .get(self.metadata_db, &METADATA_CONFIG_KEY) - .optional()? - .map(bincode::deserialize) - .transpose()?) + txn.get(&self.metadata_db(txn)?, METADATA_CONFIG_KEY)? + .map(bincode_deserialize) + .transpose() } pub fn store_config(&self, config: &Config, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( - self.metadata_db, + &self.metadata_db(txn)?, &METADATA_CONFIG_KEY, &bincode::serialize(config)?, Self::write_flags(), @@ -290,19 +430,70 @@ impl SlasherDB { Ok(()) } + pub fn get_attester_max_target( + &self, + validator_index: u64, + txn: &mut RwTransaction<'_>, + ) -> Result, Error> { + txn.get( + &self.attesters_max_targets_db(txn)?, + CurrentEpochKey::new(validator_index).as_ref(), + )? + .map(ssz_decode) + .transpose() + } + + pub fn update_attester_max_target( + &self, + validator_index: u64, + previous_max_target: Option, + max_target: Epoch, + txn: &mut RwTransaction<'_>, + ) -> Result<(), Error> { + // Don't update maximum if new target is less than or equal to previous. In the case of + // no previous we *do* want to update. + if previous_max_target.map_or(false, |prev_max| max_target <= prev_max) { + return Ok(()); + } + + // Zero out attester DB entries which are now older than the history length. + // Avoid writing the whole array on initialization (`previous_max_target == None`), and + // avoid overwriting the entire attesters array more than once. + if let Some(previous_max_target) = previous_max_target { + let start_epoch = std::cmp::max( + previous_max_target.as_u64() + 1, + (max_target.as_u64() + 1).saturating_sub(self.config.history_length as u64), + ); + for target_epoch in (start_epoch..max_target.as_u64()).map(Epoch::new) { + txn.put( + &self.attesters_db(txn)?, + &AttesterKey::new(validator_index, target_epoch, &self.config), + &CompactAttesterRecord::null().as_bytes(), + Self::write_flags(), + )?; + } + } + + txn.put( + &self.attesters_max_targets_db(txn)?, + &CurrentEpochKey::new(validator_index), + &max_target.as_ssz_bytes(), + Self::write_flags(), + )?; + Ok(()) + } + pub fn get_current_epoch_for_validator( &self, validator_index: u64, txn: &mut RwTransaction<'_>, ) -> Result, Error> { - Ok(txn - .get( - self.current_epochs_db, - &CurrentEpochKey::new(validator_index), - ) - .optional()? - .map(Epoch::from_ssz_bytes) - .transpose()?) + txn.get( + &self.current_epochs_db(txn)?, + CurrentEpochKey::new(validator_index).as_ref(), + )? + .map(ssz_decode) + .transpose() } pub fn update_current_epoch_for_validator( @@ -312,7 +503,7 @@ impl SlasherDB { txn: &mut RwTransaction<'_>, ) -> Result<(), Error> { txn.put( - self.current_epochs_db, + &self.current_epochs_db(txn)?, &CurrentEpochKey::new(validator_index), ¤t_epoch.as_ssz_bytes(), Self::write_flags(), @@ -320,41 +511,128 @@ impl SlasherDB { Ok(()) } - pub fn store_indexed_attestation( + fn get_indexed_attestation_id( &self, txn: &mut RwTransaction<'_>, - indexed_attestation_hash: Hash256, - indexed_attestation: &IndexedAttestation, - ) -> Result<(), Error> { - let key = IndexedAttestationKey::new( - indexed_attestation.data.target.epoch, - indexed_attestation_hash, - ); - let data = indexed_attestation.as_ssz_bytes(); + key: &IndexedAttestationIdKey, + ) -> Result, Error> { + txn.get(&self.indexed_attestation_id_db(txn)?, key.as_ref())? + .map(IndexedAttestationId::parse) + .transpose() + } + fn put_indexed_attestation_id( + &self, + txn: &mut RwTransaction<'_>, + key: &IndexedAttestationIdKey, + value: IndexedAttestationId, + ) -> Result<(), Error> { txn.put( - self.indexed_attestation_db, - &key, - &data, + &self.indexed_attestation_id_db(txn)?, + key, + &value, Self::write_flags(), )?; Ok(()) } + /// Store an indexed attestation and return its ID. + /// + /// If the attestation is already stored then the existing ID will be returned without a write. + pub fn store_indexed_attestation( + &self, + txn: &mut RwTransaction<'_>, + indexed_attestation_hash: Hash256, + indexed_attestation: &IndexedAttestation, + ) -> Result { + // Look-up ID by hash. + let id_key = IndexedAttestationIdKey::new( + indexed_attestation.data.target.epoch, + indexed_attestation_hash, + ); + + if let Some(indexed_att_id) = self.get_indexed_attestation_id(txn, &id_key)? { + return Ok(indexed_att_id); + } + + // Store the new indexed attestation at the end of the current table. + let mut cursor = txn.cursor(&self.indexed_attestation_db(txn)?)?; + + let indexed_att_id = match cursor.last::<_, ()>()? { + // First ID is 1 so that 0 can be used to represent `null` in `CompactAttesterRecord`. + None => 1, + Some((key_bytes, _)) => IndexedAttestationId::parse(key_bytes)? + 1, + }; + + let attestation_key = IndexedAttestationId::new(indexed_att_id); + let data = indexed_attestation.as_ssz_bytes(); + + cursor.put(attestation_key.as_ref(), &data, Self::write_flags())?; + drop(cursor); + + // Update the (epoch, hash) to ID mapping. + self.put_indexed_attestation_id(txn, &id_key, attestation_key)?; + + Ok(indexed_att_id) + } + pub fn get_indexed_attestation( &self, txn: &mut RwTransaction<'_>, - target_epoch: Epoch, - indexed_attestation_hash: Hash256, + indexed_attestation_id: IndexedAttestationId, ) -> Result, Error> { - let key = IndexedAttestationKey::new(target_epoch, indexed_attestation_hash); let bytes = txn - .get(self.indexed_attestation_db, &key) - .optional()? + .get( + &self.indexed_attestation_db(txn)?, + indexed_attestation_id.as_ref(), + )? .ok_or(Error::MissingIndexedAttestation { - root: indexed_attestation_hash, + id: indexed_attestation_id.as_u64(), })?; - Ok(IndexedAttestation::from_ssz_bytes(bytes)?) + ssz_decode(bytes) + } + + fn get_attestation_data_root( + &self, + txn: &mut RwTransaction<'_>, + indexed_id: IndexedAttestationId, + ) -> Result<(Hash256, Option>), Error> { + metrics::inc_counter(&metrics::SLASHER_NUM_ATTESTATION_ROOT_QUERIES); + + // If the value already exists in the cache, return it. + let mut cache = self.attestation_root_cache.lock(); + if let Some(attestation_data_root) = cache.get(&indexed_id) { + metrics::inc_counter(&metrics::SLASHER_NUM_ATTESTATION_ROOT_HITS); + return Ok((*attestation_data_root, None)); + } + + // Otherwise, load the indexed attestation, compute the root and cache it. + let indexed_attestation = self.get_indexed_attestation(txn, indexed_id)?; + let attestation_data_root = indexed_attestation.data.tree_hash_root(); + + cache.put(indexed_id, attestation_data_root); + + Ok((attestation_data_root, Some(indexed_attestation))) + } + + pub fn cache_attestation_data_root( + &self, + indexed_attestation_id: IndexedAttestationId, + attestation_data_root: Hash256, + ) { + let mut cache = self.attestation_root_cache.lock(); + cache.put(indexed_attestation_id, attestation_data_root); + } + + fn delete_attestation_data_roots(&self, ids: impl IntoIterator) { + let mut cache = self.attestation_root_cache.lock(); + for indexed_id in ids { + cache.pop(&indexed_id); + } + } + + pub fn attestation_root_cache_size(&self) -> usize { + self.attestation_root_cache.lock().len() } pub fn check_and_update_attester_record( @@ -362,41 +640,57 @@ impl SlasherDB { txn: &mut RwTransaction<'_>, validator_index: u64, attestation: &IndexedAttestation, - record: AttesterRecord, + record: &AttesterRecord, + indexed_attestation_id: IndexedAttestationId, ) -> Result, Error> { // See if there's an existing attestation for this attester. let target_epoch = attestation.data.target.epoch; + + let prev_max_target = self.get_attester_max_target(validator_index, txn)?; + if let Some(existing_record) = - self.get_attester_record(txn, validator_index, target_epoch)? + self.get_attester_record(txn, validator_index, target_epoch, prev_max_target)? { - // If the existing attestation data is identical, then this attestation is not + // If the existing indexed attestation is identical, then this attestation is not // slashable and no update is required. - if existing_record.attestation_data_hash == record.attestation_data_hash { + let existing_att_id = existing_record.indexed_attestation_id; + if existing_att_id == indexed_attestation_id { return Ok(AttesterSlashingStatus::NotSlashable); } - // Otherwise, load the indexed attestation so we can confirm that it's slashable. - let existing_attestation = self.get_indexed_attestation( - txn, - target_epoch, - existing_record.indexed_attestation_hash, - )?; + // Otherwise, load the attestation data root and check slashability via a hash root + // comparison. + let (existing_data_root, opt_existing_att) = + self.get_attestation_data_root(txn, existing_att_id)?; + + if existing_data_root == record.attestation_data_hash { + return Ok(AttesterSlashingStatus::NotSlashable); + } + + // If we made it this far, then the attestation is slashable. Ensure that it's + // loaded, double-check the slashing condition and return. + let existing_attestation = opt_existing_att + .map_or_else(|| self.get_indexed_attestation(txn, existing_att_id), Ok)?; + if attestation.is_double_vote(&existing_attestation) { Ok(AttesterSlashingStatus::DoubleVote(Box::new( existing_attestation, ))) } else { - Err(Error::AttesterRecordInconsistentRoot) + Err(Error::InconsistentAttestationDataRoot) } } // If no attestation exists, insert a record for this validator. else { + self.update_attester_max_target(validator_index, prev_max_target, target_epoch, txn)?; + txn.put( - self.attesters_db, - &AttesterKey::new(validator_index, target_epoch), - &record.as_ssz_bytes(), + &self.attesters_db(txn)?, + &AttesterKey::new(validator_index, target_epoch, &self.config), + &indexed_attestation_id, Self::write_flags(), )?; + Ok(AttesterSlashingStatus::NotSlashable) } } @@ -407,13 +701,15 @@ impl SlasherDB { validator_index: u64, target_epoch: Epoch, ) -> Result, Error> { + let max_target = self.get_attester_max_target(validator_index, txn)?; + let record = self - .get_attester_record(txn, validator_index, target_epoch)? + .get_attester_record(txn, validator_index, target_epoch, max_target)? .ok_or(Error::MissingAttesterRecord { validator_index, target_epoch, })?; - self.get_indexed_attestation(txn, target_epoch, record.indexed_attestation_hash) + self.get_indexed_attestation(txn, record.indexed_attestation_id) } pub fn get_attester_record( @@ -421,13 +717,18 @@ impl SlasherDB { txn: &mut RwTransaction<'_>, validator_index: u64, target: Epoch, - ) -> Result, Error> { - let attester_key = AttesterKey::new(validator_index, target); + prev_max_target: Option, + ) -> Result, Error> { + if prev_max_target.map_or(true, |prev_max| target > prev_max) { + return Ok(None); + } + + let attester_key = AttesterKey::new(validator_index, target, &self.config); Ok(txn - .get(self.attesters_db, &attester_key) - .optional()? - .map(AttesterRecord::from_ssz_bytes) - .transpose()?) + .get(&self.attesters_db(txn)?, attester_key.as_ref())? + .map(CompactAttesterRecord::parse) + .transpose()? + .filter(|record| !record.is_null())) } pub fn get_block_proposal( @@ -437,11 +738,9 @@ impl SlasherDB { slot: Slot, ) -> Result, Error> { let proposer_key = ProposerKey::new(proposer_index, slot); - Ok(txn - .get(self.proposers_db, &proposer_key) - .optional()? - .map(SignedBeaconBlockHeader::from_ssz_bytes) - .transpose()?) + txn.get(&self.proposers_db(txn)?, proposer_key.as_ref())? + .map(ssz_decode) + .transpose() } pub fn check_or_insert_block_proposal( @@ -465,7 +764,7 @@ impl SlasherDB { } } else { txn.put( - self.proposers_db, + &self.proposers_db(txn)?, &ProposerKey::new(proposer_index, slot), &block_header.as_ssz_bytes(), Self::write_flags(), @@ -491,7 +790,6 @@ impl SlasherDB { txn: &mut RwTransaction<'_>, ) -> Result<(), Error> { self.prune_proposers(current_epoch, txn)?; - self.prune_attesters(current_epoch, txn)?; self.prune_indexed_attestations(current_epoch, txn)?; Ok(()) } @@ -506,80 +804,22 @@ impl SlasherDB { .saturating_sub(self.config.history_length) .start_slot(E::slots_per_epoch()); - let mut cursor = txn.open_rw_cursor(self.proposers_db)?; + let mut cursor = txn.cursor(&self.proposers_db(txn)?)?; // Position cursor at first key, bailing out if the database is empty. - if cursor - .get(None, None, lmdb_sys::MDB_FIRST) - .optional()? - .is_none() - { + if cursor.first::<(), ()>()?.is_none() { return Ok(()); } loop { - let key_bytes = cursor - .get(None, None, lmdb_sys::MDB_GET_CURRENT)? - .0 - .ok_or(Error::MissingProposerKey)?; + let (key_bytes, ()) = cursor.get_current()?.ok_or(Error::MissingProposerKey)?; let (slot, _) = ProposerKey::parse(key_bytes)?; if slot < min_slot { cursor.del(Self::write_flags())?; // End the loop if there is no next entry. - if cursor - .get(None, None, lmdb_sys::MDB_NEXT) - .optional()? - .is_none() - { - break; - } - } else { - break; - } - } - - Ok(()) - } - - fn prune_attesters( - &self, - current_epoch: Epoch, - txn: &mut RwTransaction<'_>, - ) -> Result<(), Error> { - let min_epoch = current_epoch - .saturating_add(1u64) - .saturating_sub(self.config.history_length as u64); - - let mut cursor = txn.open_rw_cursor(self.attesters_db)?; - - // Position cursor at first key, bailing out if the database is empty. - if cursor - .get(None, None, lmdb_sys::MDB_FIRST) - .optional()? - .is_none() - { - return Ok(()); - } - - loop { - let key_bytes = cursor - .get(None, None, lmdb_sys::MDB_GET_CURRENT)? - .0 - .ok_or(Error::MissingAttesterKey)?; - - let (target_epoch, _) = AttesterKey::parse(key_bytes)?; - - if target_epoch < min_epoch { - cursor.del(Self::write_flags())?; - - // End the loop if there is no next entry. - if cursor - .get(None, None, lmdb_sys::MDB_NEXT) - .optional()? - .is_none() - { + if cursor.next::<(), ()>()?.is_none() { break; } } else { @@ -599,39 +839,46 @@ impl SlasherDB { .saturating_add(1u64) .saturating_sub(self.config.history_length as u64); - let mut cursor = txn.open_rw_cursor(self.indexed_attestation_db)?; + // Collect indexed attestation IDs to delete. + let mut indexed_attestation_ids = vec![]; + + let mut cursor = txn.cursor(&self.indexed_attestation_id_db(txn)?)?; // Position cursor at first key, bailing out if the database is empty. - if cursor - .get(None, None, lmdb_sys::MDB_FIRST) - .optional()? - .is_none() - { + if cursor.first::<(), ()>()?.is_none() { return Ok(()); } loop { - let key_bytes = cursor - .get(None, None, lmdb_sys::MDB_GET_CURRENT)? - .0 - .ok_or(Error::MissingAttesterKey)?; + let (key_bytes, value) = cursor + .get_current()? + .ok_or(Error::MissingIndexedAttestationIdKey)?; - let (target_epoch, _) = IndexedAttestationKey::parse(key_bytes)?; + let (target_epoch, _) = IndexedAttestationIdKey::parse(key_bytes)?; if target_epoch < min_epoch { + indexed_attestation_ids.push(IndexedAttestationId::new( + IndexedAttestationId::parse(value)?, + )); + cursor.del(Self::write_flags())?; - if cursor - .get(None, None, lmdb_sys::MDB_NEXT) - .optional()? - .is_none() - { + if cursor.next::<(), ()>()?.is_none() { break; } } else { break; } } + drop(cursor); + + // Delete the indexed attestations. + // Optimisation potential: use a cursor here. + let indexed_attestation_db = self.indexed_attestation_db(txn)?; + for indexed_attestation_id in &indexed_attestation_ids { + txn.del(&indexed_attestation_db, indexed_attestation_id, None)?; + } + self.delete_attestation_data_roots(indexed_attestation_ids); Ok(()) } diff --git a/slasher/src/error.rs b/slasher/src/error.rs index d40a54f714..7e689022e4 100644 --- a/slasher/src/error.rs +++ b/slasher/src/error.rs @@ -1,10 +1,10 @@ -use crate::Config; +use crate::config::{Config, DiskConfig}; use std::io; -use types::{Epoch, Hash256}; +use types::Epoch; #[derive(Debug)] pub enum Error { - DatabaseError(lmdb::Error), + DatabaseError(mdbx::Error), DatabaseIOError(io::Error), DatabasePermissionsError(filesystem::Error), SszDecodeError(ssz::DecodeError), @@ -19,12 +19,16 @@ pub enum Error { chunk_size: usize, history_length: usize, }, + ConfigInvalidHistoryLength { + history_length: usize, + max_history_length: usize, + }, ConfigInvalidZeroParameter { config: Config, }, ConfigIncompatible { - on_disk_config: Config, - config: Config, + on_disk_config: DiskConfig, + config: DiskConfig, }, ConfigMissing, DistanceTooLarge, @@ -43,22 +47,26 @@ pub enum Error { ProposerKeyCorrupt { length: usize, }, - IndexedAttestationKeyCorrupt { + IndexedAttestationIdKeyCorrupt { + length: usize, + }, + IndexedAttestationIdCorrupt { length: usize, }, MissingIndexedAttestation { - root: Hash256, + id: u64, }, MissingAttesterKey, MissingProposerKey, - MissingIndexedAttestationKey, - AttesterRecordInconsistentRoot, + MissingIndexedAttestationId, + MissingIndexedAttestationIdKey, + InconsistentAttestationDataRoot, } -impl From for Error { - fn from(e: lmdb::Error) -> Self { +impl From for Error { + fn from(e: mdbx::Error) -> Self { match e { - lmdb::Error::Other(os_error) => Error::from(io::Error::from_raw_os_error(os_error)), + mdbx::Error::Other(os_error) => Error::from(io::Error::from_raw_os_error(os_error)), _ => Error::DatabaseError(e), } } diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 10427ba2f0..184e3080e5 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -16,14 +16,18 @@ mod utils; pub use crate::slasher::Slasher; pub use attestation_queue::{AttestationBatch, AttestationQueue, SimpleBatch}; -pub use attester_record::{AttesterRecord, IndexedAttesterRecord}; +pub use attester_record::{AttesterRecord, CompactAttesterRecord, IndexedAttesterRecord}; pub use block_queue::BlockQueue; pub use config::Config; -pub use database::SlasherDB; +pub use database::{IndexedAttestationId, SlasherDB}; pub use error::Error; use types::{AttesterSlashing, EthSpec, IndexedAttestation, ProposerSlashing}; +/// LMDB-to-MDBX compatibility shims. +pub type Environment = mdbx::Environment; +pub type RwTransaction<'env> = mdbx::Transaction<'env, mdbx::RW, mdbx::NoWriteMap>; + #[derive(Debug, PartialEq)] pub enum AttesterSlashingStatus { NotSlashable, diff --git a/slasher/src/metrics.rs b/slasher/src/metrics.rs index 6b21fb013a..b11d21d4b5 100644 --- a/slasher/src/metrics.rs +++ b/slasher/src/metrics.rs @@ -4,7 +4,7 @@ pub use lighthouse_metrics::*; lazy_static! { pub static ref SLASHER_DATABASE_SIZE: Result = try_create_int_gauge( "slasher_database_size", - "Size of the LMDB database backing the slasher, in bytes" + "Size of the database backing the slasher, in bytes" ); pub static ref SLASHER_RUN_TIME: Result = try_create_histogram( "slasher_process_batch_time", @@ -40,4 +40,17 @@ lazy_static! { "slasher_compression_ratio", "Compression ratio for min-max array chunks (higher is better)" ); + pub static ref SLASHER_NUM_ATTESTATION_ROOT_QUERIES: Result = + try_create_int_counter( + "slasher_num_attestation_root_queries", + "Number of requests for an attestation data root", + ); + pub static ref SLASHER_NUM_ATTESTATION_ROOT_HITS: Result = try_create_int_counter( + "slasher_num_attestation_root_hits", + "Number of requests for an attestation data root that hit the LRU cache", + ); + pub static ref SLASHER_ATTESTATION_ROOT_CACHE_SIZE: Result = try_create_int_gauge( + "slasher_attestation_root_cache_size", + "Number of attestation data roots cached in memory" + ); } diff --git a/slasher/src/migrate.rs b/slasher/src/migrate.rs index 020c7aaf9a..674ab9c132 100644 --- a/slasher/src/migrate.rs +++ b/slasher/src/migrate.rs @@ -1,79 +1,29 @@ -use crate::{ - config::{DEFAULT_BROADCAST, DEFAULT_SLOT_OFFSET}, - database::CURRENT_SCHEMA_VERSION, - Config, Error, SlasherDB, -}; -use lmdb::RwTransaction; -use serde_derive::{Deserialize, Serialize}; -use std::path::PathBuf; +use crate::{database::CURRENT_SCHEMA_VERSION, Error, SlasherDB}; use types::EthSpec; -/// Config from schema version 1, for migration to version 2+. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ConfigV1 { - database_path: PathBuf, - chunk_size: usize, - validator_chunk_size: usize, - history_length: usize, - update_period: u64, - max_db_size_mbs: usize, -} - -type ConfigV2 = Config; - -impl Into for ConfigV1 { - fn into(self) -> ConfigV2 { - Config { - database_path: self.database_path, - chunk_size: self.chunk_size, - validator_chunk_size: self.validator_chunk_size, - history_length: self.history_length, - update_period: self.update_period, - slot_offset: DEFAULT_SLOT_OFFSET, - max_db_size_mbs: self.max_db_size_mbs, - broadcast: DEFAULT_BROADCAST, - } - } -} - impl SlasherDB { /// If the database exists, and has a schema, attempt to migrate it to the current version. - pub fn migrate(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> { - if let Some(schema_version) = self.load_schema_version(txn)? { - match (schema_version, CURRENT_SCHEMA_VERSION) { - // The migration from v1 to v2 is a bit messy because v1.0.5 silently - // changed the schema to v2, so a v1 schema could have either a v1 or v2 - // config. - (1, 2) => { - match self.load_config::(txn) { - Ok(Some(config_v1)) => { - // Upgrade to v2 config and store on disk. - let config_v2 = config_v1.into(); - self.store_config(&config_v2, txn)?; - } - Ok(None) => { - // Impossible to have schema version and no config. - return Err(Error::ConfigMissing); - } - Err(_) => { - // If loading v1 config failed, ensure loading v2 config succeeds. - // No further action is needed. - let _config_v2 = self.load_config::(txn)?; - } - } - } - (x, y) if x == y => {} - (_, _) => { - return Err(Error::IncompatibleSchemaVersion { - database_schema_version: schema_version, - software_schema_version: CURRENT_SCHEMA_VERSION, - }); - } - } - } + pub fn migrate(self) -> Result { + let mut txn = self.begin_rw_txn()?; + let schema_version = self.load_schema_version(&mut txn)?; + drop(txn); - // If the migration succeeded, update the schema version on-disk. - self.store_schema_version(txn)?; - Ok(()) + if let Some(schema_version) = schema_version { + match (schema_version, CURRENT_SCHEMA_VERSION) { + // Schema v3 changed the underlying database from LMDB to MDBX. Unless the user did + // some manual hacking it should be impossible to read an MDBX schema version < 3. + (from, _) if from < 3 => Err(Error::IncompatibleSchemaVersion { + database_schema_version: schema_version, + software_schema_version: CURRENT_SCHEMA_VERSION, + }), + (x, y) if x == y => Ok(self), + (_, _) => Err(Error::IncompatibleSchemaVersion { + database_schema_version: schema_version, + software_schema_version: CURRENT_SCHEMA_VERSION, + }), + } + } else { + Ok(self) + } } } diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 122ed439a4..066c8d63d9 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -6,9 +6,8 @@ use crate::metrics::{ }; use crate::{ array, AttestationBatch, AttestationQueue, AttesterRecord, BlockQueue, Config, Error, - ProposerSlashingStatus, SimpleBatch, SlasherDB, + IndexedAttestationId, ProposerSlashingStatus, RwTransaction, SimpleBatch, SlasherDB, }; -use lmdb::{RwTransaction, Transaction}; use parking_lot::Mutex; use slog::{debug, error, info, Logger}; use std::collections::HashSet; @@ -32,7 +31,7 @@ impl Slasher { pub fn open(config: Config, log: Logger) -> Result { config.validate()?; let config = Arc::new(config); - let db = SlasherDB::open(config.clone())?; + let db = SlasherDB::open(config.clone(), log.clone())?; let attester_slashings = Mutex::new(HashSet::new()); let proposer_slashings = Mutex::new(HashSet::new()); let attestation_queue = AttestationQueue::default(); @@ -159,11 +158,19 @@ impl Slasher { let mut num_stored = 0; for weak_record in &batch.attestations { if let Some(indexed_record) = weak_record.upgrade() { - self.db.store_indexed_attestation( + let indexed_attestation_id = self.db.store_indexed_attestation( txn, indexed_record.record.indexed_attestation_hash, &indexed_record.indexed, )?; + indexed_record.set_id(indexed_attestation_id); + + // Prime the attestation data root LRU cache. + self.db.cache_attestation_data_root( + IndexedAttestationId::new(indexed_attestation_id), + indexed_record.record.attestation_data_hash, + ); + num_stored += 1; } } @@ -184,6 +191,12 @@ impl Slasher { for (subqueue_id, subqueue) in grouped_attestations.subqueues.into_iter().enumerate() { self.process_batch(txn, subqueue_id, subqueue, current_epoch)?; } + + metrics::set_gauge( + &metrics::SLASHER_ATTESTATION_ROOT_CACHE_SIZE, + self.db.attestation_root_cache_size() as i64, + ); + Ok(AttestationStats { num_processed }) } @@ -197,11 +210,13 @@ impl Slasher { ) -> Result<(), Error> { // First, check for double votes. for attestation in &batch { + let indexed_attestation_id = IndexedAttestationId::new(attestation.get_id()); match self.check_double_votes( txn, subqueue_id, &attestation.indexed, - attestation.record, + &attestation.record, + indexed_attestation_id, ) { Ok(slashings) => { if !slashings.is_empty() { @@ -262,7 +277,8 @@ impl Slasher { txn: &mut RwTransaction<'_>, subqueue_id: usize, attestation: &IndexedAttestation, - attester_record: AttesterRecord, + attester_record: &AttesterRecord, + indexed_attestation_id: IndexedAttestationId, ) -> Result>, Error> { let mut slashings = HashSet::new(); @@ -275,6 +291,7 @@ impl Slasher { validator_index, attestation, attester_record, + indexed_attestation_id, )?; if let Some(slashing) = slashing_status.into_slashing(attestation) { diff --git a/slasher/src/utils.rs b/slasher/src/utils.rs index 9c9eceaa14..ccd31e74e2 100644 --- a/slasher/src/utils.rs +++ b/slasher/src/utils.rs @@ -1,20 +1,5 @@ use crate::Error; -/// Mix-in trait for loading values from LMDB that may or may not exist. -pub trait TxnOptional { - fn optional(self) -> Result, E>; -} - -impl TxnOptional for Result { - fn optional(self) -> Result, Error> { - match self { - Ok(x) => Ok(Some(x)), - Err(lmdb::Error::NotFound) => Ok(None), - Err(e) => Err(e.into()), - } - } -} - /// Transform a transaction that would fail with a `MapFull` error into an optional result. pub trait TxnMapFull { fn allow_map_full(self) -> Result, E>; @@ -24,7 +9,7 @@ impl TxnMapFull for Result { fn allow_map_full(self) -> Result, Error> { match self { Ok(x) => Ok(Some(x)), - Err(Error::DatabaseError(lmdb::Error::MapFull)) => Ok(None), + Err(Error::DatabaseError(mdbx::Error::MapFull)) => Ok(None), Err(e) => Err(e), } } diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index 987853077a..a2abbc55b1 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -171,7 +171,7 @@ fn slasher_test( should_process_after: impl Fn(usize) -> bool, ) { let tempdir = tempdir().unwrap(); - let config = Config::new(tempdir.path().into()).for_testing(); + let config = Config::new(tempdir.path().into()); let slasher = Slasher::open(config, test_logger()).unwrap(); let current_epoch = Epoch::new(current_epoch); @@ -200,7 +200,7 @@ fn parallel_slasher_test( current_epoch: u64, ) { let tempdir = tempdir().unwrap(); - let config = Config::new(tempdir.path().into()).for_testing(); + let config = Config::new(tempdir.path().into()); let slasher = Slasher::open(config, test_logger()).unwrap(); let current_epoch = Epoch::new(current_epoch); diff --git a/slasher/tests/proposer_slashings.rs b/slasher/tests/proposer_slashings.rs index 13a9422fed..e8b052e664 100644 --- a/slasher/tests/proposer_slashings.rs +++ b/slasher/tests/proposer_slashings.rs @@ -9,7 +9,7 @@ use types::{Epoch, EthSpec}; #[test] fn empty_pruning() { let tempdir = tempdir().unwrap(); - let config = Config::new(tempdir.path().into()).for_testing(); + let config = Config::new(tempdir.path().into()); let slasher = Slasher::::open(config, test_logger()).unwrap(); slasher.prune_database(Epoch::new(0)).unwrap(); } @@ -19,7 +19,7 @@ fn block_pruning() { let slots_per_epoch = E::slots_per_epoch(); let tempdir = tempdir().unwrap(); - let mut config = Config::new(tempdir.path().into()).for_testing(); + let mut config = Config::new(tempdir.path().into()); config.chunk_size = 2; config.history_length = 2; diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 22ae26d135..7ff7fe5850 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -41,7 +41,7 @@ fn random_test(seed: u64, test_config: TestConfig) { let tempdir = tempdir().unwrap(); - let mut config = Config::new(tempdir.path().into()).for_testing(); + let mut config = Config::new(tempdir.path().into()); config.validator_chunk_size = 1 << rng.gen_range(1, 4); let chunk_size_exponent = rng.gen_range(1, 4); diff --git a/slasher/tests/wrap_around.rs b/slasher/tests/wrap_around.rs index 47054ebc66..b256840ee5 100644 --- a/slasher/tests/wrap_around.rs +++ b/slasher/tests/wrap_around.rs @@ -1,12 +1,12 @@ use logging::test_logger; -use slasher::{test_utils::indexed_att, Config, Error, Slasher}; +use slasher::{test_utils::indexed_att, Config, Slasher}; use tempfile::tempdir; use types::Epoch; #[test] fn attestation_pruning_empty_wrap_around() { let tempdir = tempdir().unwrap(); - let mut config = Config::new(tempdir.path().into()).for_testing(); + let mut config = Config::new(tempdir.path().into()); config.validator_chunk_size = 1; config.chunk_size = 16; config.history_length = 16; @@ -35,53 +35,3 @@ fn attestation_pruning_empty_wrap_around() { )); slasher.process_queued(current_epoch).unwrap(); } - -// Test that pruning can recover from a `MapFull` error -#[test] -fn pruning_with_map_full() { - let tempdir = tempdir().unwrap(); - let mut config = Config::new(tempdir.path().into()).for_testing(); - config.validator_chunk_size = 1; - config.chunk_size = 16; - config.history_length = 1024; - config.max_db_size_mbs = 1; - - let slasher = Slasher::open(config, test_logger()).unwrap(); - - let v = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; - - let mut current_epoch = Epoch::new(0); - - loop { - slasher.accept_attestation(indexed_att( - v.clone(), - (current_epoch - 1).as_u64(), - current_epoch.as_u64(), - 0, - )); - if let Err(Error::DatabaseError(lmdb::Error::MapFull)) = - slasher.process_queued(current_epoch) - { - break; - } - current_epoch += 1; - } - - loop { - slasher.prune_database(current_epoch).unwrap(); - - slasher.accept_attestation(indexed_att( - v.clone(), - (current_epoch - 1).as_u64(), - current_epoch.as_u64(), - 0, - )); - match slasher.process_queued(current_epoch) { - Ok(_) => break, - Err(Error::DatabaseError(lmdb::Error::MapFull)) => { - current_epoch += 1; - } - Err(e) => panic!("{:?}", e), - } - } -} diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar new file mode 100644 index 0000000000..d9084af348 --- /dev/null +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -0,0 +1,26 @@ +FROM rust:1.56.1-bullseye AS builder +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +COPY . lighthouse + +# build lighthouse directly with a cargo build command, bypassing the makefile +RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse + +# build lcli binary directly with cargo install command, bypassing the makefile +RUN cargo install --path /lighthouse/lcli --force --locked + +FROM ubuntu:latest +RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ + libssl-dev \ + ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# create and move the libvoidstar file +RUN mkdir libvoidstar +COPY --from=builder /lighthouse/testing/antithesis/libvoidstar/libvoidstar.so /usr/lib/libvoidstar.so + +# set the env variable to avoid having to always set it +ENV LD_LIBRARY_PATH=/usr/lib +# move the lighthouse binary and lcli binary +COPY --from=builder /lighthouse/target/x86_64-unknown-linux-gnu/release/lighthouse /usr/local/bin/lighthouse +COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli diff --git a/testing/antithesis/libvoidstar/libvoidstar.so b/testing/antithesis/libvoidstar/libvoidstar.so new file mode 100644 index 0000000000..0f8a0f23c3 Binary files /dev/null and b/testing/antithesis/libvoidstar/libvoidstar.so differ diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index e1668a9b49..6819674664 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -22,9 +22,9 @@ serde = "1.0.116" serde_derive = "1.0.116" serde_repr = "0.1.6" serde_yaml = "0.8.13" -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" -tree_hash = "0.4.0" +tree_hash = "0.4.1" tree_hash_derive = "0.4.0" cached_tree_hash = { path = "../../consensus/cached_tree_hash" } state_processing = { path = "../../consensus/state_processing" } @@ -34,3 +34,4 @@ snap = "1.0.1" fs2 = "0.4.3" beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } +fork_choice = { path = "../../consensus/fork_choice" } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 5a4385fd1a..3cd6d17c0c 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.1.3 +TESTS_TAG := v1.1.8 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index a7149c1a59..2eb4ce5407 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -7,6 +7,7 @@ # The ultimate goal is to detect any accidentally-missed spec tests. import os +import re import sys # First argument should the path to a file which contains a list of accessed file names. @@ -16,35 +17,28 @@ accessed_files_filename = sys.argv[1] tests_dir_filename = sys.argv[2] # If any of the file names found in the consensus-spec-tests directory *starts with* one of the -# following strings, we will assume they are to be ignored (i.e., we are purposefully *not* running -# the spec tests). +# following regular expressions, we will assume they are to be ignored (i.e., we are purposefully +# *not* running the spec tests). excluded_paths = [ - # Merge tests - "tests/minimal/merge", - "tests/mainnet/merge", - # Eth1Block + # Eth1Block and PowBlock # # Intentionally omitted, as per https://github.com/sigp/lighthouse/issues/1835 - "tests/minimal/phase0/ssz_static/Eth1Block/", - "tests/mainnet/phase0/ssz_static/Eth1Block/", - "tests/minimal/altair/ssz_static/Eth1Block/", - "tests/mainnet/altair/ssz_static/Eth1Block/", + "tests/.*/.*/ssz_static/Eth1Block/", + "tests/.*/.*/ssz_static/PowBlock/", # LightClientStore - "tests/minimal/altair/ssz_static/LightClientStore", - "tests/mainnet/altair/ssz_static/LightClientStore", + "tests/.*/.*/ssz_static/LightClientStore", # LightClientUpdate - "tests/minimal/altair/ssz_static/LightClientUpdate", - "tests/mainnet/altair/ssz_static/LightClientUpdate", + "tests/.*/.*/ssz_static/LightClientUpdate", # LightClientSnapshot - "tests/minimal/altair/ssz_static/LightClientSnapshot", - "tests/mainnet/altair/ssz_static/LightClientSnapshot", + "tests/.*/.*/ssz_static/LightClientSnapshot", # Merkle-proof tests for light clients - "tests/mainnet/altair/merkle/single_proof/pyspec_tests/", - "tests/minimal/altair/merkle/single_proof/pyspec_tests/" + "tests/.*/.*/merkle/single_proof", + # One of the EF researchers likes to pack the tarballs on a Mac + ".*\.DS_Store.*" ] def normalize_path(path): - return path.split("consensus-spec-tests/", )[1] + return path.split("consensus-spec-tests/")[1] # Determine the list of filenames which were accessed during tests. passed = set() @@ -59,21 +53,21 @@ excluded_files = 0 # Iterate all files in the tests directory, ensure that all files were either accessed # or intentionally missed. for root, dirs, files in os.walk(tests_dir_filename): - for name in files: - name = normalize_path(os.path.join(root, name)) - if name not in passed: - excluded = False - for excluded_path in excluded_paths: - if name.startswith(excluded_path): - excluded = True - break - if excluded: - excluded_files += 1 - else: - print(name) - missed.add(name) - else: - accessed_files += 1 + for name in files: + name = normalize_path(os.path.join(root, name)) + if name not in passed: + excluded = False + for excluded_path_regex in excluded_paths: + if re.match(excluded_path_regex, name): + excluded = True + break + if excluded: + excluded_files += 1 + else: + print(name) + missed.add(name) + else: + accessed_files += 1 # Exit with an error if there were any files missed. assert len(missed) == 0, "{} missed files".format(len(missed)) diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index e290421762..ac9ca8993c 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -26,6 +26,7 @@ mod ssz_generic; mod ssz_static; mod transition; +pub use self::fork_choice::*; pub use bls_aggregate_sigs::*; pub use bls_aggregate_verify::*; pub use bls_eth_aggregate_pubkeys::*; @@ -36,7 +37,6 @@ pub use bls_verify_msg::*; pub use common::SszStaticType; pub use epoch_processing::*; pub use fork::ForkTest; -pub use fork_choice::*; pub use genesis_initialization::*; pub use genesis_validity::*; pub use operations::*; diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index 175ad113b6..ade8711cdc 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -77,5 +77,6 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { match fork_name { ForkName::Base => ForkName::Base, ForkName::Altair => ForkName::Base, + ForkName::Merge => ForkName::Altair, // TODO: Check this when tests are released.. } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 56e6c9b7bc..b187d46fed 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -94,10 +94,12 @@ impl EpochTransition for JustificationAndFinalization { spec, ) } - BeaconState::Altair(_) => altair::process_justification_and_finalization( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - ), + BeaconState::Altair(_) | BeaconState::Merge(_) => { + altair::process_justification_and_finalization( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + ) + } } } } @@ -110,11 +112,13 @@ impl EpochTransition for RewardsAndPenalties { validator_statuses.process_attestations(state)?; base::process_rewards_and_penalties(state, &mut validator_statuses, spec) } - BeaconState::Altair(_) => altair::process_rewards_and_penalties( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - spec, - ), + BeaconState::Altair(_) | BeaconState::Merge(_) => { + altair::process_rewards_and_penalties( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + spec, + ) + } } } } @@ -134,17 +138,15 @@ impl EpochTransition for Slashings { process_slashings( state, validator_statuses.total_balances.current_epoch(), - spec.proportional_slashing_multiplier, spec, )?; } - BeaconState::Altair(_) => { + BeaconState::Altair(_) | BeaconState::Merge(_) => { process_slashings( state, altair::ParticipationCache::new(state, spec) .unwrap() .current_epoch_total_active_balance(), - spec.proportional_slashing_multiplier_altair, spec, )?; } @@ -197,7 +199,9 @@ impl EpochTransition for SyncCommitteeUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) => altair::process_sync_committee_updates(state, spec), + BeaconState::Altair(_) | BeaconState::Merge(_) => { + altair::process_sync_committee_updates(state, spec) + } } } } @@ -206,7 +210,7 @@ impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) => altair::process_inactivity_updates( + BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_inactivity_updates( state, &altair::ParticipationCache::new(state, spec).unwrap(), spec, @@ -219,7 +223,9 @@ impl EpochTransition for ParticipationFlagUpdates { fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { match state { BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) => altair::process_participation_flag_updates(state), + BeaconState::Altair(_) | BeaconState::Merge(_) => { + altair::process_participation_flag_updates(state) + } } } } @@ -267,7 +273,7 @@ impl> Case for EpochProcessing { && T::name() != "inactivity_updates" && T::name() != "participation_flag_updates" } - ForkName::Altair => true, + ForkName::Altair | ForkName::Merge => true, // TODO: revisit when tests are out } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index f3591bee72..ae12447abf 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -3,7 +3,7 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::cases::common::previous_fork; use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; -use state_processing::upgrade::upgrade_to_altair; +use state_processing::upgrade::{upgrade_to_altair, upgrade_to_bellatrix}; use types::{BeaconState, ForkName}; #[derive(Debug, Clone, Default, Deserialize)] @@ -58,8 +58,9 @@ impl Case for ForkTest { let spec = &E::default_spec(); let mut result = match fork_name { + ForkName::Base => panic!("phase0 not supported"), ForkName::Altair => upgrade_to_altair(&mut result_state, spec).map(|_| result_state), - _ => panic!("unknown fork: {:?}", fork_name), + ForkName::Merge => upgrade_to_bellatrix(&mut result_state, spec).map(|_| result_state), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 7d7b21da13..608429a9cb 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -1,5 +1,7 @@ use super::*; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; +use ::fork_choice::PayloadVerificationStatus; +use beacon_chain::slot_clock::SlotClock; use beacon_chain::{ attestation_verification::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, @@ -8,13 +10,22 @@ use beacon_chain::{ BeaconChainTypes, HeadInfo, }; use serde_derive::Deserialize; +use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; use std::time::Duration; use types::{ Attestation, BeaconBlock, BeaconState, Checkpoint, Epoch, EthSpec, ForkName, Hash256, - IndexedAttestation, SignedBeaconBlock, Slot, + IndexedAttestation, SignedBeaconBlock, Slot, Uint256, }; +#[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] +#[serde(deny_unknown_fields)] +pub struct PowBlock { + pub block_hash: Hash256, + pub parent_hash: Hash256, + pub total_difficulty: Uint256, +} + #[derive(Debug, Clone, Copy, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub struct Head { @@ -32,22 +43,25 @@ pub struct Checks { justified_checkpoint_root: Option, finalized_checkpoint: Option, best_justified_checkpoint: Option, + proposer_boost_root: Option, } #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step { +pub enum Step { Tick { tick: u64 }, ValidBlock { block: B }, MaybeValidBlock { block: B, valid: bool }, Attestation { attestation: A }, + PowBlock { pow_block: P }, Checks { checks: Box }, } #[derive(Debug, Clone, Deserialize)] #[serde(deny_unknown_fields)] pub struct Meta { - description: String, + #[serde(rename(deserialize = "description"))] + _description: String, } #[derive(Debug)] @@ -55,7 +69,16 @@ pub struct ForkChoiceTest { pub description: String, pub anchor_state: BeaconState, pub anchor_block: BeaconBlock, - pub steps: Vec, Attestation>>, + pub steps: Vec, Attestation, PowBlock>>, +} + +/// Spec for fork choice tests, with proposer boosting enabled. +/// +/// This function can be deleted once `ChainSpec::mainnet` enables proposer boosting by default. +pub fn fork_choice_spec(fork_name: ForkName) -> ChainSpec { + let mut spec = testing_spec::(fork_name); + spec.proposer_score_boost = Some(70); + spec } impl LoadCase for ForkChoiceTest { @@ -67,8 +90,8 @@ impl LoadCase for ForkChoiceTest { .to_str() .expect("path must be valid OsStr") .to_string(); - let spec = &testing_spec::(fork_name); - let steps: Vec> = yaml_decode_file(&path.join("steps.yaml"))?; + let spec = &fork_choice_spec::(fork_name); + let steps: Vec> = yaml_decode_file(&path.join("steps.yaml"))?; // Resolve the object names in `steps.yaml` into actual decoded block/attestation objects. let steps = steps .into_iter() @@ -90,6 +113,10 @@ impl LoadCase for ForkChoiceTest { ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))) .map(|attestation| Step::Attestation { attestation }) } + Step::PowBlock { pow_block } => { + ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) + .map(|pow_block| Step::PowBlock { pow_block }) + } Step::Checks { checks } => Ok(Step::Checks { checks }), }) .collect::>()?; @@ -125,14 +152,12 @@ impl Case for ForkChoiceTest { } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { - let tester = Tester::new(self, testing_spec::(fork_name))?; + let tester = Tester::new(self, fork_choice_spec::(fork_name))?; - // The reason for this failure is documented here: - // - // https://github.com/sigp/lighthouse/issues/2741 - // - // We should eventually solve the above issue and remove this `SkippedKnownFailure`. - if self.description == "new_finalized_slot_is_justified_checkpoint_ancestor" { + // TODO(merge): re-enable this test before production. + // This test is skipped until we can do retrospective confirmations of the terminal + // block after an optimistic sync. + if self.description == "block_lookup_failed" { return Err(Error::SkippedKnownFailure); }; @@ -144,6 +169,7 @@ impl Case for ForkChoiceTest { tester.process_block(block.clone(), *valid)? } Step::Attestation { attestation } => tester.process_attestation(attestation)?, + Step::PowBlock { pow_block } => tester.process_pow_block(pow_block), Step::Checks { checks } => { let Checks { head, @@ -153,6 +179,7 @@ impl Case for ForkChoiceTest { justified_checkpoint_root, finalized_checkpoint, best_justified_checkpoint, + proposer_boost_root, } = checks.as_ref(); if let Some(expected_head) = head { @@ -184,6 +211,10 @@ impl Case for ForkChoiceTest { tester .check_best_justified_checkpoint(*expected_best_justified_checkpoint)?; } + + if let Some(expected_proposer_boost_root) = proposer_boost_root { + tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?; + } } } } @@ -218,6 +249,8 @@ impl Tester { .spec(spec.clone()) .keypairs(vec![]) .genesis_state_ephemeral_store(case.anchor_state.clone()) + .mock_execution_layer() + .mock_execution_layer_all_payloads_valid() .build(); if harness.chain.genesis_block_root != case.anchor_block.canonical_root() { @@ -228,6 +261,15 @@ impl Tester { )); } + // Drop any blocks that might be loaded in the mock execution layer. Some of these tests + // will provide their own blocks and we want to start from a clean state. + harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .drop_all_blocks(); + assert_eq!( harness.chain.slot_clock.genesis_duration().as_secs(), genesis_time @@ -283,10 +325,11 @@ impl Tester { let block_root = block.canonical_root(); if result.is_ok() != valid { return Err(Error::DidntFail(format!( - "block with root {} was valid={} whilst test expects valid={}", + "block with root {} was valid={} whilst test expects valid={}. result: {:?}", block_root, result.is_ok(), - valid + valid, + result ))); } @@ -313,12 +356,21 @@ impl Tester { ) .unwrap(); + let block_delay = self + .harness + .chain + .slot_clock + .seconds_from_current_slot_start(self.spec.seconds_per_slot) + .unwrap(); + let (block, _) = block.deconstruct(); let result = self.harness.chain.fork_choice.write().on_block( self.harness.chain.slot().unwrap(), &block, block_root, + block_delay, &state, + PayloadVerificationStatus::Irrelevant, &self.harness.chain.spec, ); @@ -352,6 +404,21 @@ impl Tester { .map_err(|e| Error::InternalError(format!("attestation import failed with {:?}", e))) } + pub fn process_pow_block(&self, pow_block: &PowBlock) { + let el = self.harness.mock_execution_layer.as_ref().unwrap(); + + // The EF tests don't supply a block number. Our mock execution layer is fine with duplicate + // block numbers for the purposes of this test. + let block_number = 0; + + el.server.insert_pow_block( + block_number, + pow_block.block_hash, + pow_block.parent_hash, + pow_block.total_difficulty, + ); + } + pub fn check_head(&self, expected_head: Head) -> Result<(), Error> { let chain_head = self.find_head().map(|head| Head { slot: head.slot, @@ -439,6 +506,18 @@ impl Tester { expected_checkpoint, ) } + + pub fn check_expected_proposer_boost_root( + &self, + expected_proposer_boost_root: Hash256, + ) -> Result<(), Error> { + let proposer_boost_root = self.harness.chain.fork_choice.read().proposer_boost_root(); + check_equal( + "proposer_boost_root", + proposer_boost_root, + expected_proposer_boost_root, + ) + } } /// Checks that the `head` checkpoint from the beacon chain head matches the `fc` checkpoint gleaned diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index 2a9323c96a..dc139ac0b9 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -4,11 +4,12 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; -use types::{BeaconState, Deposit, EthSpec, ForkName, Hash256}; +use types::{BeaconState, Deposit, EthSpec, ExecutionPayloadHeader, ForkName, Hash256}; #[derive(Debug, Clone, Deserialize)] struct Metadata { deposits_count: usize, + execution_payload_header: Option, } #[derive(Debug, Clone, Deserialize)] @@ -24,6 +25,7 @@ pub struct GenesisInitialization { pub eth1_block_hash: Hash256, pub eth1_timestamp: u64, pub deposits: Vec, + pub execution_payload_header: Option>, pub state: Option>, } @@ -34,6 +36,14 @@ impl LoadCase for GenesisInitialization { eth1_timestamp, } = yaml_decode_file(&path.join("eth1.yaml"))?; let meta: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; + let execution_payload_header: Option> = + if meta.execution_payload_header.unwrap_or(false) { + Some(ssz_decode_file( + &path.join("execution_payload_header.ssz_snappy"), + )?) + } else { + None + }; let deposits: Vec = (0..meta.deposits_count) .map(|i| { let filename = format!("deposits_{}.ssz_snappy", i); @@ -48,6 +58,7 @@ impl LoadCase for GenesisInitialization { eth1_block_hash, eth1_timestamp, deposits, + execution_payload_header, state: Some(state), }) } @@ -66,6 +77,7 @@ impl Case for GenesisInitialization { self.eth1_block_hash, self.eth1_timestamp, self.deposits.clone(), + self.execution_payload_header.clone(), spec, ); diff --git a/testing/ef_tests/src/cases/genesis_validity.rs b/testing/ef_tests/src/cases/genesis_validity.rs index e645d69adc..abdc1ed4a7 100644 --- a/testing/ef_tests/src/cases/genesis_validity.rs +++ b/testing/ef_tests/src/cases/genesis_validity.rs @@ -7,7 +7,8 @@ use types::{BeaconState, EthSpec, ForkName}; #[derive(Debug, Clone, Deserialize)] pub struct Metadata { - description: String, + #[serde(rename(deserialize = "description"))] + _description: String, } #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 293195662d..195df7f382 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -7,18 +7,18 @@ use crate::type_name::TypeName; use serde_derive::Deserialize; use state_processing::per_block_processing::{ errors::BlockProcessingError, - process_block_header, + process_block_header, process_execution_payload, process_operations::{ altair, base, process_attester_slashings, process_deposits, process_exits, process_proposer_slashings, }, - process_sync_aggregate, VerifySignatures, + process_sync_aggregate, VerifyBlockRoot, VerifySignatures, }; use std::fmt::Debug; use std::path::Path; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, ForkName, - ProposerSlashing, SignedVoluntaryExit, SyncAggregate, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, + ExecutionPayload, ForkName, ProposerSlashing, SignedVoluntaryExit, SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -27,9 +27,15 @@ struct Metadata { bls_setting: Option, } +#[derive(Debug, Clone, Deserialize)] +struct ExecutionMetadata { + execution_valid: bool, +} + #[derive(Debug, Clone)] pub struct Operations> { metadata: Metadata, + execution_metadata: Option, pub pre: BeaconState, pub operation: Option, pub post: Option>, @@ -54,6 +60,7 @@ pub trait Operation: TypeName + Debug + Sync + Sized { &self, state: &mut BeaconState, spec: &ChainSpec, + _: &Operations, ) -> Result<(), BlockProcessingError>; } @@ -66,13 +73,14 @@ impl Operation for Attestation { &self, state: &mut BeaconState, spec: &ChainSpec, + _: &Operations, ) -> Result<(), BlockProcessingError> { let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; match state { BeaconState::Base(_) => { base::process_attestations(state, &[self.clone()], VerifySignatures::True, spec) } - BeaconState::Altair(_) => altair::process_attestation( + BeaconState::Altair(_) | BeaconState::Merge(_) => altair::process_attestation( state, self, 0, @@ -97,6 +105,7 @@ impl Operation for AttesterSlashing { &self, state: &mut BeaconState, spec: &ChainSpec, + _: &Operations, ) -> Result<(), BlockProcessingError> { process_attester_slashings(state, &[self.clone()], VerifySignatures::True, spec) } @@ -111,6 +120,7 @@ impl Operation for Deposit { &self, state: &mut BeaconState, spec: &ChainSpec, + _: &Operations, ) -> Result<(), BlockProcessingError> { process_deposits(state, &[self.clone()], spec) } @@ -129,6 +139,7 @@ impl Operation for ProposerSlashing { &self, state: &mut BeaconState, spec: &ChainSpec, + _: &Operations, ) -> Result<(), BlockProcessingError> { process_proposer_slashings(state, &[self.clone()], VerifySignatures::True, spec) } @@ -147,6 +158,7 @@ impl Operation for SignedVoluntaryExit { &self, state: &mut BeaconState, spec: &ChainSpec, + _: &Operations, ) -> Result<(), BlockProcessingError> { process_exits(state, &[self.clone()], VerifySignatures::True, spec) } @@ -169,8 +181,9 @@ impl Operation for BeaconBlock { &self, state: &mut BeaconState, spec: &ChainSpec, + _: &Operations, ) -> Result<(), BlockProcessingError> { - process_block_header(state, self.to_ref(), spec)?; + process_block_header(state, self.to_ref(), VerifyBlockRoot::True, spec)?; Ok(()) } } @@ -196,12 +209,48 @@ impl Operation for SyncAggregate { &self, state: &mut BeaconState, spec: &ChainSpec, + _: &Operations, ) -> Result<(), BlockProcessingError> { let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64; process_sync_aggregate(state, self, proposer_index, VerifySignatures::True, spec) } } +impl Operation for ExecutionPayload { + fn handler_name() -> String { + "execution_payload".into() + } + + fn filename() -> String { + "execution_payload.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair + } + + fn decode(path: &Path, _spec: &ChainSpec) -> Result { + ssz_decode_file(path) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + extra: &Operations, + ) -> Result<(), BlockProcessingError> { + let valid = extra + .execution_metadata + .as_ref() + .map_or(false, |e| e.execution_valid); + if valid { + process_execution_payload(state, self, spec) + } else { + Err(BlockProcessingError::ExecutionInvalid) + } + } +} + impl> LoadCase for Operations { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); @@ -212,6 +261,14 @@ impl> LoadCase for Operations { Metadata::default() }; + // For execution payloads only. + let execution_yaml_path = path.join("execution.yaml"); + let execution_metadata = if execution_yaml_path.is_file() { + Some(yaml_decode_file(&execution_yaml_path)?) + } else { + None + }; + let pre = ssz_decode_state(&path.join("pre.ssz_snappy"), spec)?; // Check BLS setting here before SSZ deserialization, as most types require signatures @@ -237,6 +294,7 @@ impl> LoadCase for Operations { Ok(Self { metadata, + execution_metadata, pre, operation, post, @@ -270,7 +328,7 @@ impl> Case for Operations { .operation .as_ref() .ok_or(Error::SkippedBls)? - .apply_to(&mut state, spec) + .apply_to(&mut state, spec, self) .map(|()| state); compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index c9f48c936e..8aa041bce1 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -3,6 +3,7 @@ use crate::case_result::compare_result_detailed; use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use compare_fields_derive::CompareFields; use serde_derive::Deserialize; +use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; use state_processing::{ per_epoch_processing::{ @@ -26,11 +27,16 @@ pub struct Deltas { penalties: Vec, } -#[derive(Debug, Clone, PartialEq, CompareFields)] +// Define "legacy" implementations of `Option`, `Option` which use four bytes +// for encoding the union selector. +four_byte_option_impl!(four_byte_option_deltas, Deltas); + +#[derive(Debug, Clone, PartialEq, Decode, Encode, CompareFields)] pub struct AllDeltas { source_deltas: Deltas, target_deltas: Deltas, head_deltas: Deltas, + #[ssz(with = "four_byte_option_deltas")] inclusion_delay_deltas: Option, inactivity_penalty_deltas: Deltas, } diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index cb5708b12e..c155be877a 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -5,6 +5,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, + VerifyBlockRoot, }; use types::{BeaconState, EthSpec, ForkName, RelativeEpoch, SignedBeaconBlock}; @@ -98,6 +99,7 @@ impl Case for SanityBlocks { signed_block, None, BlockSignatureStrategy::VerifyIndividual, + VerifyBlockRoot::True, spec, )?; @@ -106,6 +108,7 @@ impl Case for SanityBlocks { signed_block, None, BlockSignatureStrategy::VerifyBulk, + VerifyBlockRoot::True, spec, )?; diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 022da9223d..2374ead888 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -15,7 +15,8 @@ use types::{BitList, BitVector, FixedVector, ForkName, VariableList}; #[derive(Debug, Clone, Deserialize)] struct Metadata { root: String, - signing_root: Option, + #[serde(rename(deserialize = "signing_root"))] + _signing_root: Option, } #[derive(Debug, Clone)] diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index 732a7d851f..d0cc5f9eac 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -10,7 +10,8 @@ use types::{BeaconBlock, BeaconState, ForkName, Hash256, SignedBeaconBlock}; #[derive(Debug, Clone, Deserialize)] struct SszStaticRoots { root: String, - signing_root: Option, + #[serde(rename(deserialize = "signing_root"))] + _signing_root: Option, } /// Runner for types that implement `ssz::Decode`. diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index d41a52d52f..d2b1bb2c62 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -4,6 +4,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, + VerifyBlockRoot, }; use std::str::FromStr; use types::{BeaconState, Epoch, ForkName, SignedBeaconBlock}; @@ -37,6 +38,10 @@ impl LoadCase for TransitionTest { ForkName::Altair => { spec.altair_fork_epoch = Some(metadata.fork_epoch); } + ForkName::Merge => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks @@ -91,6 +96,7 @@ impl Case for TransitionTest { block, None, BlockSignatureStrategy::VerifyBulk, + VerifyBlockRoot::True, spec, ) .map_err(|e| format!("Block processing failed: {:?}", e))?; diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 11bda8f9f3..636119cdba 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -2,7 +2,7 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name; use crate::type_name::TypeName; use derivative::Derivative; -use std::fs; +use std::fs::{self, DirEntry}; use std::marker::PhantomData; use std::path::PathBuf; use types::{BeaconState, EthSpec, ForkName}; @@ -31,29 +31,27 @@ pub trait Handler { } fn run_for_fork(&self, fork_name: ForkName) { - let fork_name_str = match fork_name { - ForkName::Base => "phase0", - ForkName::Altair => "altair", - }; + let fork_name_str = fork_name.to_string(); let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("consensus-spec-tests") .join("tests") .join(Self::config_name()) - .join(fork_name_str) + .join(&fork_name_str) .join(Self::runner_name()) .join(self.handler_name()); // Iterate through test suites + let as_directory = |entry: Result| -> Option { + entry + .ok() + .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) + }; let test_cases = fs::read_dir(&handler_path) .expect("handler dir exists") - .flat_map(|entry| { - entry - .ok() - .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) - }) + .filter_map(as_directory) .flat_map(|suite| fs::read_dir(suite.path()).expect("suite dir exists")) - .flat_map(Result::ok) + .filter_map(as_directory) .map(|test_case_dir| { let path = test_case_dir.path(); let case = Self::Case::load_from_dir(&path, fork_name).expect("test should load"); @@ -144,6 +142,18 @@ impl SszStaticHandler { pub fn altair_only() -> Self { Self::for_forks(vec![ForkName::Altair]) } + + pub fn altair_and_later() -> Self { + Self::for_forks(ForkName::list_all()[1..].to_vec()) + } + + pub fn merge_only() -> Self { + Self::for_forks(vec![ForkName::Merge]) + } + + pub fn merge_and_later() -> Self { + Self::for_forks(ForkName::list_all()[2..].to_vec()) + } } /// Handler for SSZ types that implement `CachedTreeHash`. @@ -297,6 +307,11 @@ pub struct RandomHandler(PhantomData); impl Handler for RandomHandler { type Case = cases::SanityBlocks; + // FIXME(merge): enable merge tests once available + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name != ForkName::Merge + } + fn config_name() -> &'static str { E::name() } @@ -421,37 +436,21 @@ impl Handler for FinalityHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct ForkChoiceGetHeadHandler(PhantomData); +pub struct ForkChoiceHandler { + handler_name: String, + _phantom: PhantomData, +} -impl Handler for ForkChoiceGetHeadHandler { - type Case = cases::ForkChoiceTest; - - fn config_name() -> &'static str { - E::name() - } - - fn runner_name() -> &'static str { - "fork_choice" - } - - fn handler_name(&self) -> String { - "get_head".into() - } - - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // These tests check block validity (which may include signatures) and there is no need to - // run them with fake crypto. - cfg!(not(feature = "fake_crypto")) +impl ForkChoiceHandler { + pub fn new(handler_name: &str) -> Self { + Self { + handler_name: handler_name.into(), + _phantom: PhantomData, + } } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct ForkChoiceOnBlockHandler(PhantomData); - -impl Handler for ForkChoiceOnBlockHandler { +impl Handler for ForkChoiceHandler { type Case = cases::ForkChoiceTest; fn config_name() -> &'static str { @@ -463,10 +462,17 @@ impl Handler for ForkChoiceOnBlockHandler { } fn handler_name(&self) -> String { - "on_block".into() + self.handler_name.clone() } - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Merge block tests are only enabled for Bellatrix or later. + if self.handler_name == "on_merge_block" + && (fork_name == ForkName::Base || fork_name == ForkName::Altair) + { + return false; + } + // These tests check block validity (which may include signatures) and there is no need to // run them with fake crypto. cfg!(not(feature = "fake_crypto")) diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 6576a2fb26..4d068cb91f 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -41,21 +41,20 @@ type_name_generic!(Attestation); type_name!(AttestationData); type_name_generic!(AttesterSlashing); type_name_generic!(BeaconBlock); -type_name_generic!(BeaconBlockBase, "BeaconBlock"); -type_name_generic!(BeaconBlockAltair, "BeaconBlock"); type_name_generic!(BeaconBlockBody); type_name_generic!(BeaconBlockBodyBase, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyAltair, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyMerge, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); -type_name_generic!(BeaconStateBase, "BeaconState"); -type_name_generic!(BeaconStateAltair, "BeaconState"); type_name!(Checkpoint); type_name_generic!(ContributionAndProof); type_name!(Deposit); type_name!(DepositData); type_name!(DepositMessage); type_name!(Eth1Data); +type_name_generic!(ExecutionPayload); +type_name_generic!(ExecutionPayloadHeader); type_name!(Fork); type_name!(ForkData); type_name_generic!(HistoricalBatch); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 25a4618558..bdefec0014 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -70,6 +70,12 @@ fn operations_sync_aggregate() { OperationsHandler::>::default().run(); } +#[test] +fn operations_execution_payload() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + #[test] fn sanity_blocks() { SanityBlocksHandler::::default().run(); @@ -228,55 +234,74 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::altair_only() .run(); + SszStaticHandler::, MinimalEthSpec>::merge_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::merge_only() + .run(); } - // Altair-only + // Altair and later #[test] fn contribution_and_proof() { - SszStaticHandler::, MinimalEthSpec>::altair_only() - .run(); - SszStaticHandler::, MainnetEthSpec>::altair_only() - .run(); + SszStaticHandler::, MinimalEthSpec>::altair_and_later( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::altair_and_later( + ) + .run(); } #[test] fn signed_contribution_and_proof() { - SszStaticHandler::, MinimalEthSpec>::altair_only().run(); - SszStaticHandler::, MainnetEthSpec>::altair_only().run(); + SszStaticHandler::, MinimalEthSpec>::altair_and_later().run(); + SszStaticHandler::, MainnetEthSpec>::altair_and_later().run(); } #[test] fn sync_aggregate() { - SszStaticHandler::, MinimalEthSpec>::altair_only().run(); - SszStaticHandler::, MainnetEthSpec>::altair_only().run(); + SszStaticHandler::, MinimalEthSpec>::altair_and_later().run(); + SszStaticHandler::, MainnetEthSpec>::altair_and_later().run(); } #[test] fn sync_committee() { - SszStaticHandler::, MinimalEthSpec>::altair_only().run(); - SszStaticHandler::, MainnetEthSpec>::altair_only().run(); + SszStaticHandler::, MinimalEthSpec>::altair_and_later().run(); + SszStaticHandler::, MainnetEthSpec>::altair_and_later().run(); } #[test] fn sync_committee_contribution() { - SszStaticHandler::, MinimalEthSpec>::altair_only( - ) - .run(); - SszStaticHandler::, MainnetEthSpec>::altair_only( - ) - .run(); + SszStaticHandler::, MinimalEthSpec>::altair_and_later().run(); + SszStaticHandler::, MainnetEthSpec>::altair_and_later().run(); } #[test] fn sync_committee_message() { - SszStaticHandler::::altair_only().run(); - SszStaticHandler::::altair_only().run(); + SszStaticHandler::::altair_and_later().run(); + SszStaticHandler::::altair_and_later().run(); } #[test] fn sync_aggregator_selection_data() { - SszStaticHandler::::altair_only().run(); - SszStaticHandler::::altair_only().run(); + SszStaticHandler::::altair_and_later().run(); + SszStaticHandler::::altair_and_later().run(); + } + + // Merge and later + #[test] + fn execution_payload() { + SszStaticHandler::, MinimalEthSpec>::merge_and_later() + .run(); + SszStaticHandler::, MainnetEthSpec>::merge_and_later() + .run(); + } + + #[test] + fn execution_payload_header() { + SszStaticHandler::, MinimalEthSpec>::merge_and_later() + .run(); + SszStaticHandler::, MainnetEthSpec>::merge_and_later() + .run(); } } @@ -388,14 +413,26 @@ fn finality() { #[test] fn fork_choice_get_head() { - ForkChoiceGetHeadHandler::::default().run(); - ForkChoiceGetHeadHandler::::default().run(); + ForkChoiceHandler::::new("get_head").run(); + ForkChoiceHandler::::new("get_head").run(); } #[test] fn fork_choice_on_block() { - ForkChoiceOnBlockHandler::::default().run(); - ForkChoiceOnBlockHandler::::default().run(); + ForkChoiceHandler::::new("on_block").run(); + ForkChoiceHandler::::new("on_block").run(); +} + +#[test] +fn fork_choice_on_merge_block() { + ForkChoiceHandler::::new("on_merge_block").run(); + ForkChoiceHandler::::new("on_merge_block").run(); +} + +#[test] +fn fork_choice_ex_ante() { + ForkChoiceHandler::::new("ex_ante").run(); + ForkChoiceHandler::::new("ex_ante").run(); } #[test] diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 42bf61384d..7ff387b9c6 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -193,7 +193,7 @@ pub async fn verify_full_sync_aggregates_up_to( .map(|agg| agg.num_set_bits()) }) .map_err(|e| format!("Error while getting beacon block: {:?}", e))? - .ok_or(format!("Altair block {} should have sync aggregate", slot))?; + .map_err(|_| format!("Altair block {} should have sync aggregate", slot))?; if sync_aggregate_count != E::sync_committee_size() { return Err(format!( diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 50727f4266..80fc755d52 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -6,8 +6,8 @@ use eth1::{DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; use eth1_test_rig::GanacheEth1Instance; use futures::prelude::*; use node_test_rig::{ - environment::EnvironmentBuilder, testing_client_config, testing_validator_config, - ClientGenesis, ValidatorFiles, + environment::{EnvironmentBuilder, LoggerConfig}, + testing_client_config, testing_validator_config, ClientGenesis, ValidatorFiles, }; use rayon::prelude::*; use sensitive_url::SensitiveUrl; @@ -53,7 +53,15 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let log_format = None; let mut env = EnvironmentBuilder::minimal() - .async_logger(log_level, log_format)? + .initialize_logger(LoggerConfig { + path: None, + debug_level: log_level, + logfile_debug_level: "debug", + log_format, + max_log_size: 0, + max_log_number: 0, + compression: false, + })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 2eda987d49..5d2f0be72f 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -2,8 +2,8 @@ use crate::{checks, LocalNetwork}; use clap::ArgMatches; use futures::prelude::*; use node_test_rig::{ - environment::EnvironmentBuilder, testing_client_config, testing_validator_config, - ClientGenesis, ValidatorFiles, + environment::{EnvironmentBuilder, LoggerConfig}, + testing_client_config, testing_validator_config, ClientGenesis, ValidatorFiles, }; use rayon::prelude::*; use std::cmp::max; @@ -45,7 +45,15 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let log_format = None; let mut env = EnvironmentBuilder::mainnet() - .async_logger(log_level, log_format)? + .initialize_logger(LoggerConfig { + path: None, + debug_level: log_level, + logfile_debug_level: "debug", + log_format, + max_log_size: 0, + max_log_number: 0, + compression: false, + })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 9da52a35c9..e328938db1 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -3,7 +3,8 @@ use crate::local_network::LocalNetwork; use clap::ArgMatches; use futures::prelude::*; use node_test_rig::{ - environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorFiles, + environment::{EnvironmentBuilder, LoggerConfig}, + testing_client_config, ClientGenesis, ValidatorFiles, }; use node_test_rig::{testing_validator_config, ClientConfig}; use std::cmp::max; @@ -45,7 +46,15 @@ fn syncing_sim( log_format: Option<&str>, ) -> Result<(), String> { let mut env = EnvironmentBuilder::minimal() - .async_logger(log_level, log_format)? + .initialize_logger(LoggerConfig { + path: None, + debug_level: log_level, + logfile_debug_level: "debug", + log_format, + max_log_size: 0, + max_log_number: 0, + compression: false, + })? .multi_threaded_tokio_runtime()? .build()?; diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 7dc17f64c4..1192f79909 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -9,6 +9,6 @@ edition = "2018" [dependencies] state_processing = { path = "../../consensus/state_processing" } types = { path = "../../consensus/types" } -eth2_ssz = "0.4.0" +eth2_ssz = "0.4.1" beacon_chain = { path = "../../beacon_node/beacon_chain" } lazy_static = "1.4.0" diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index a52ccf420d..75f82b3132 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -2,7 +2,7 @@ use super::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use state_processing::{ per_block_processing, per_block_processing::errors::ExitInvalid, BlockProcessingError, - BlockSignatureStrategy, + BlockSignatureStrategy, VerifyBlockRoot, }; use types::{BeaconBlock, BeaconState, Epoch, EthSpec, SignedBeaconBlock}; @@ -66,6 +66,7 @@ impl ExitTest { block, None, BlockSignatureStrategy::VerifyIndividual, + VerifyBlockRoot::True, &E::default_spec(), ) } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index d48443b5cd..4e8aa57a5b 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -12,7 +12,7 @@ path = "src/lib.rs" tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } [dependencies] -tree_hash = "0.4.0" +tree_hash = "0.4.1" clap = "2.33.3" slashing_protection = { path = "./slashing_protection" } slot_clock = { path = "../common/slot_clock" } @@ -45,7 +45,7 @@ lighthouse_version = { path = "../common/lighthouse_version" } warp_utils = { path = "../common/warp_utils" } warp = "0.3.2" hyper = "0.14.4" -eth2_serde_utils = "0.1.0" +eth2_serde_utils = "0.1.1" libsecp256k1 = "0.6.0" ring = "0.16.19" rand = "0.7.3" diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 7f30170de9..634e49feea 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -13,9 +13,13 @@ r2d2_sqlite = "0.18.0" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.58" -eth2_serde_utils = "0.1.0" +eth2_serde_utils = "0.1.1" filesystem = { path = "../../common/filesystem" } +arbitrary = { version = "1.0", features = ["derive"], optional = true } [dev-dependencies] lazy_static = "1.4.0" rayon = "1.4.1" + +[features] +arbitrary-fuzz = ["arbitrary", "types/arbitrary-fuzz"] diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile index 5787590260..e3d935b4c9 100644 --- a/validator_client/slashing_protection/Makefile +++ b/validator_client/slashing_protection/Makefile @@ -1,8 +1,8 @@ -TESTS_TAG := v5.2.0 +TESTS_TAG := v5.2.1 GENERATE_DIR := generated-tests OUTPUT_DIR := interchange-tests TARBALL := $(OUTPUT_DIR)-$(TESTS_TAG).tar.gz -ARCHIVE_URL := https://github.com/eth2-clients/slashing-protection-interchange-tests/tarball/$(TESTS_TAG) +ARCHIVE_URL := https://github.com/eth-clients/slashing-protection-interchange-tests/tarball/$(TESTS_TAG) ifeq ($(OS),Windows_NT) ifeq (, $(shell where rm)) diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index 2bca9727af..b96dd8eb79 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -224,6 +224,19 @@ fn main() { .with_blocks(vec![(0, 20, false)]), ], ), + MultiTestCase::new( + "multiple_interchanges_single_validator_multiple_blocks_out_of_order", + vec![ + TestCase::new(interchange(vec![(0, vec![0], vec![])])).with_blocks(vec![ + (0, 10, true), + (0, 20, true), + (0, 30, true), + ]), + TestCase::new(interchange(vec![(0, vec![20], vec![])])) + .contains_slashable_data() + .with_blocks(vec![(0, 29, false)]), + ], + ), MultiTestCase::new( "multiple_interchanges_single_validator_fail_iff_imported", vec![ diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index a9185e5bb2..3793766b6a 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -7,6 +7,7 @@ use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, @@ -15,6 +16,7 @@ pub struct InterchangeMetadata { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeData { pub pubkey: PublicKeyBytes, pub signed_blocks: Vec, @@ -23,6 +25,7 @@ pub struct InterchangeData { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedBlock { #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] pub slot: Slot, @@ -32,6 +35,7 @@ pub struct SignedBlock { #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, @@ -42,6 +46,7 @@ pub struct SignedAttestation { } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct Interchange { pub metadata: InterchangeMetadata, pub data: Vec, diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index 6bd6ce38b3..dc828773b9 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -9,6 +9,7 @@ use tempfile::tempdir; use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct MultiTestCase { pub name: String, pub genesis_validators_root: Hash256, @@ -16,6 +17,7 @@ pub struct MultiTestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestCase { pub should_succeed: bool, pub contains_slashable_data: bool, @@ -25,6 +27,7 @@ pub struct TestCase { } #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestBlock { pub pubkey: PublicKeyBytes, pub slot: Slot, @@ -33,6 +36,7 @@ pub struct TestBlock { } #[derive(Debug, Clone, Deserialize, Serialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct TestAttestation { pub pubkey: PublicKeyBytes, pub source_epoch: Epoch, @@ -230,7 +234,7 @@ impl TestCase { } } -fn check_minification_invariants(interchange: &Interchange, minified: &Interchange) { +pub fn check_minification_invariants(interchange: &Interchange, minified: &Interchange) { // Metadata should be unchanged. assert_eq!(interchange.metadata, minified.metadata); diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 725aa6057d..2b187f46ef 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -648,29 +648,17 @@ impl SlashingDatabase { // Summary of minimum and maximum messages pre-import. let prev_summary = self.validator_summary(pubkey, txn)?; - // If the interchange contains a new maximum slot block, import it. + // If the interchange contains any blocks, update the database with the new max slot. let max_block = record.signed_blocks.iter().max_by_key(|b| b.slot); if let Some(max_block) = max_block { - // Block is relevant if there are no previous blocks, or new block has slot greater than - // previous maximum. - if prev_summary - .max_block_slot - .map_or(true, |max_block_slot| max_block.slot > max_block_slot) - { - self.insert_block_proposal( - txn, - pubkey, - max_block.slot, - max_block - .signing_root - .map(SigningRoot::from) - .unwrap_or_default(), - )?; + // Store new synthetic block with maximum slot and null signing root. Remove all other + // blocks. + let new_max_slot = max_or(prev_summary.max_block_slot, max_block.slot); + let signing_root = SigningRoot::default(); - // Prune the database so that it contains *only* the new block. - self.prune_signed_blocks(&record.pubkey, max_block.slot, txn)?; - } + self.clear_signed_blocks(pubkey, txn)?; + self.insert_block_proposal(txn, pubkey, new_max_slot, signing_root)?; } // Find the attestations with max source and max target. Unless the input contains slashable @@ -901,6 +889,23 @@ impl SlashingDatabase { Ok(()) } + /// Remove all blocks signed by a given `public_key`. + /// + /// Dangerous, should only be used immediately before inserting a new block in the same + /// transacation. + fn clear_signed_blocks( + &self, + public_key: &PublicKeyBytes, + txn: &Transaction, + ) -> Result<(), NotSafe> { + let validator_id = self.get_validator_id_in_txn(txn, public_key)?; + txn.execute( + "DELETE FROM signed_blocks WHERE validator_id = ?1", + params![validator_id], + )?; + Ok(()) + } + /// Prune the signed attestations table for the given validator keys. pub fn prune_all_signed_attestations<'a>( &self, diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 4b07c72b8a..0695012fb3 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -131,7 +131,7 @@ impl Config { if let Some(beacon_nodes) = parse_optional::(cli_args, "beacon-nodes")? { config.beacon_nodes = beacon_nodes .split(',') - .map(|s| SensitiveUrl::parse(s)) + .map(SensitiveUrl::parse) .collect::>() .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 57585e2672..72e651f7d1 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -282,10 +282,7 @@ pub fn load_pem_certificate>(pem_path: P) -> Result Result { - Url::parse(base_url)?.join(&format!( - "api/v1/eth2/sign/{}", - voting_public_key.to_string() - )) + Url::parse(base_url)?.join(&format!("api/v1/eth2/sign/{}", voting_public_key)) } /// Try to unlock `keystore` at `keystore_path` by prompting the user via `stdin`. diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index a3ab10316a..a721496fcd 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -84,7 +84,6 @@ pub struct ProductionValidatorClient { doppelganger_service: Option>, validator_store: Arc>, http_api_listen_addr: Option, - http_metrics_ctx: Option>>, config: Config, } @@ -431,7 +430,6 @@ impl ProductionValidatorClient { validator_store, config, http_api_listen_addr: None, - http_metrics_ctx, }) } diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 561cda1610..7f28700a20 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -28,6 +28,7 @@ pub enum Error { Web3SignerJsonParsingFailed(String), ShuttingDown, TokioJoin(String), + MergeForkNotSupported, } /// Enumerates all messages that can be signed by a validator. @@ -158,7 +159,7 @@ impl SigningMethod { SignableMessage::RandaoReveal(epoch) => { Web3SignerObject::RandaoReveal { epoch } } - SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block), + SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?, SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a), SignableMessage::SignedAggregateAndProof(a) => { Web3SignerObject::AggregateAndProof(a) diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 6ffe2a1ee0..b632986c94 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -1,5 +1,6 @@ //! Contains the types required to make JSON requests to Web3Signer servers. +use super::Error; use serde::{Deserialize, Serialize}; use types::*; @@ -66,13 +67,14 @@ pub enum Web3SignerObject<'a, T: EthSpec> { } impl<'a, T: EthSpec> Web3SignerObject<'a, T> { - pub fn beacon_block(block: &'a BeaconBlock) -> Self { + pub fn beacon_block(block: &'a BeaconBlock) -> Result { let version = match block { BeaconBlock::Base(_) => ForkName::Phase0, BeaconBlock::Altair(_) => ForkName::Altair, + BeaconBlock::Merge(_) => return Err(Error::MergeForkNotSupported), }; - Web3SignerObject::BeaconBlock { version, block } + Ok(Web3SignerObject::BeaconBlock { version, block }) } pub fn message_type(&self) -> MessageType {