diff --git a/.dockerignore b/.dockerignore index bafdf59616..738cc4a278 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,5 @@ testing/ef_tests/consensus-spec-tests +testing/execution_engine_integration/execution_clients target/ *.data *.tar.gz diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 41fcfce38f..598754368e 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -7,7 +7,7 @@ on: jobs: build-and-upload-to-s3: - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@master diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 57ccbdaa14..da0bcb3857 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -12,7 +12,7 @@ env: # Deny warnings in CI RUSTFLAGS: "-D warnings" # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2021-12-01 + PINNED_NIGHTLY: nightly-2022-05-20 jobs: target-branch-check: name: target-branch-check diff --git a/Cargo.lock b/Cargo.lock index c7e79866cf..ed4915bf35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -36,7 +36,7 @@ dependencies = [ "eth2_keystore", "eth2_wallet", "filesystem", - "rand 0.7.3", + "rand 0.8.5", "regex", "rpassword", "serde", @@ -86,7 +86,7 @@ checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", "cipher", - "cpufeatures 0.2.1", + "cpufeatures 0.2.2", "ctr", "opaque-debug", ] @@ -175,12 +175,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - [[package]] name = "arrayvec" version = "0.7.2" @@ -279,10 +273,10 @@ dependencies = [ ] [[package]] -name = "base64" -version = "0.12.3" +name = "base16ct" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" [[package]] name = "base64" @@ -290,24 +284,32 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "base64ct" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" + [[package]] name = "beacon_chain" version = "0.2.0" dependencies = [ - "bitvec 0.19.6", + "bitvec 0.20.4", "bls", "derivative", "environment", "eth1", "eth2", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "eth2_ssz_types", "execution_layer", + "exit-future", "fork_choice", "futures", "genesis", + "hex", "int_to_bytes", "itertools", "lazy_static", @@ -317,9 +319,9 @@ dependencies = [ "maplit", "merkle_proof", "operation_pool", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "proto_array", - "rand 0.7.3", + "rand 0.8.5", "rayon", "safe_arith", "sensitive_url", @@ -333,7 +335,7 @@ dependencies = [ "smallvec", "state_processing", "store", - "strum 0.21.0", + "strum", "superstruct", "task_executor", "tempfile", @@ -344,7 +346,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "2.1.5" +version = "2.2.1" dependencies = [ "beacon_chain", "clap", @@ -409,38 +411,28 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium 0.3.0", -] - -[[package]] -name = "bitvec" -version = "0.19.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55f93d0ef3363c364d5976646a38f04cf67cfe1d4c8d160cdea02cab2c116b33" -dependencies = [ - "funty", - "radium 0.5.3", - "tap", - "wyz", -] - [[package]] name = "bitvec" version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" dependencies = [ - "funty", + "funty 1.1.0", "radium 0.6.2", "tap", - "wyz", + "wyz 0.2.0", +] + +[[package]] +name = "bitvec" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", + "tap", + "wyz 0.5.0", ] [[package]] @@ -483,7 +475,7 @@ version = "0.2.0" dependencies = [ "arbitrary", "blst", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "eth2_serde_utils", "eth2_ssz", "ethereum-types 0.12.1", @@ -510,7 +502,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.1.5" +version = "2.2.1" dependencies = [ "beacon_node", "clap", @@ -568,12 +560,6 @@ version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" -[[package]] -name = "byte-slice-cast" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" - [[package]] name = "byte-slice-cast" version = "1.2.1" @@ -591,6 +577,9 @@ name = "bytes" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +dependencies = [ + "serde", +] [[package]] name = "bzip2" @@ -617,9 +606,9 @@ dependencies = [ name = "cached_tree_hash" version = "0.1.0" dependencies = [ - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "eth2_ssz_types", "ethereum-types 0.12.1", "quickcheck 0.9.2", @@ -669,7 +658,7 @@ checksum = "01b72a433d0cf2aef113ba70f62634c56fddb0f244e6377185c56a7cadbd8f91" dependencies = [ "cfg-if", "cipher", - "cpufeatures 0.2.1", + "cpufeatures 0.2.2", "zeroize", ] @@ -771,7 +760,7 @@ dependencies = [ "lighthouse_network", "monitoring_api", "network", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "sensitive_url", "serde", "serde_derive", @@ -828,6 +817,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279bc8fc53f788a75c7804af68237d1fce02cde1e275a886a4b320604dc2aeda" +[[package]] +name = "const-oid" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" + [[package]] name = "convert_case" version = "0.4.0" @@ -870,9 +865,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" dependencies = [ "libc", ] @@ -972,6 +967,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +dependencies = [ + "generic-array", + "rand_core 0.6.3", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.3" @@ -1039,7 +1046,7 @@ version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a19c6cedffdc8c03a3346d723eb20bd85a13362bb96dc2ac000842c6381ec7bf" dependencies = [ - "nix 0.23.1", + "nix", "winapi", ] @@ -1106,8 +1113,9 @@ dependencies = [ [[package]] name = "darwin-libproc" -version = "0.2.0" -source = "git+https://github.com/agemanning/darwin-libproc?rev=73d1587cb363c00737652fdc987f1bcbaf153ef7#73d1587cb363c00737652fdc987f1bcbaf153ef7" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb90051930c9a0f09e585762152048e23ac74d20c10590ef7cf01c0343c3046" dependencies = [ "darwin-libproc-sys", "libc", @@ -1116,8 +1124,12 @@ dependencies = [ [[package]] name = "darwin-libproc-sys" -version = "0.2.0" -source = "git+https://github.com/agemanning/darwin-libproc?rev=73d1587cb363c00737652fdc987f1bcbaf153ef7#73d1587cb363c00737652fdc987f1bcbaf153ef7" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57cebb5bde66eecdd30ddc4b9cd208238b15db4982ccc72db59d699ea10867c1" +dependencies = [ + "libc", +] [[package]] name = "data-encoding" @@ -1138,7 +1150,7 @@ dependencies = [ "slog", "sloggers", "store", - "strum 0.24.0", + "strum", "tempfile", "types", ] @@ -1154,7 +1166,7 @@ name = "deposit_contract" version = "0.2.0" dependencies = [ "eth2_ssz", - "ethabi 12.0.0", + "ethabi 16.0.0", "hex", "reqwest", "serde_json", @@ -1169,10 +1181,19 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eeb9d92785d1facb50567852ce75d0858630630e7eabea59cf7eb7474051087" dependencies = [ - "const-oid", + "const-oid 0.5.2", "typenum", ] +[[package]] +name = "der" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +dependencies = [ + "const-oid 0.7.1", +] + [[package]] name = "derivative" version = "2.2.0" @@ -1286,7 +1307,7 @@ checksum = "ed8f54486179d5a7f11e1f5526f49d925a411a96c1141a707bd5f071be2ab630" dependencies = [ "aes", "aes-gcm", - "arrayvec 0.7.2", + "arrayvec", "digest 0.10.3", "enr", "fnv", @@ -1299,7 +1320,7 @@ dependencies = [ "lru", "parking_lot 0.11.2", "rand 0.8.5", - "rlp 0.5.1", + "rlp", "sha2 0.9.9", "smallvec", "tokio", @@ -1307,7 +1328,7 @@ dependencies = [ "tokio-util", "tracing", "tracing-subscriber", - "uint 0.9.3", + "uint", "zeroize", ] @@ -1323,12 +1344,24 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34d33b390ab82f2e1481e331dbd0530895640179d2128ef9a79cc690b78d1eba" dependencies = [ - "der", - "elliptic-curve", + "der 0.3.5", + "elliptic-curve 0.9.12", "hmac 0.11.0", "signature", ] +[[package]] +name = "ecdsa" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +dependencies = [ + "der 0.5.1", + "elliptic-curve 0.11.12", + "rfc6979", + "signature", +] + [[package]] name = "ed25519" version = "1.4.0" @@ -1363,7 +1396,7 @@ dependencies = [ "compare_fields_derive", "derivative", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "ethereum-types 0.12.1", "fork_choice", "fs2", @@ -1397,15 +1430,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13e9b0c3c4170dcc2a12783746c4205d98e18957f57854251eea3f9750fe005" dependencies = [ "bitvec 0.20.4", - "ff", + "ff 0.9.0", "generic-array", - "group", - "pkcs8", + "group 0.9.0", + "pkcs8 0.6.1", "rand_core 0.6.3", "subtle", "zeroize", ] +[[package]] +name = "elliptic-curve" +version = "0.11.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" +dependencies = [ + "base16ct", + "crypto-bigint", + "der 0.5.1", + "ff 0.11.1", + "generic-array", + "group 0.11.0", + "rand_core 0.6.3", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encoding_rs" version = "0.8.30" @@ -1421,17 +1472,17 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "809869a1328bfb586b48c9c0f87761c47c41793a85bcb06f66074a87cafc1bcd" dependencies = [ - "base64 0.13.0", + "base64", "bs58", "bytes", "ed25519-dalek", "hex", - "k256", + "k256 0.8.1", "log", "rand 0.8.5", - "rlp 0.5.1", + "rlp", "serde", - "sha3", + "sha3 0.9.1", "zeroize", ] @@ -1508,14 +1559,14 @@ dependencies = [ "eth1_test_rig", "eth2", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "fallback", "futures", "hex", "lazy_static", "lighthouse_metrics", "merkle_proof", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "reqwest", "sensitive_url", "serde", @@ -1552,11 +1603,12 @@ dependencies = [ "eth2_keystore", "eth2_serde_utils", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "futures", "futures-util", - "libsecp256k1 0.6.0", + "libsecp256k1", "lighthouse_network", + "mime", "procinfo", "proto_array", "psutil", @@ -1578,18 +1630,6 @@ dependencies = [ "types", ] -[[package]] -name = "eth2_hashing" -version = "0.2.0" -dependencies = [ - "cpufeatures 0.1.5", - "lazy_static", - "ring", - "rustc-hex", - "sha2 0.9.9", - "wasm-bindgen-test", -] - [[package]] name = "eth2_hashing" version = "0.2.0" @@ -1602,13 +1642,25 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "eth2_hashing" +version = "0.3.0" +dependencies = [ + "cpufeatures 0.2.2", + "lazy_static", + "ring", + "rustc-hex", + "sha2 0.10.2", + "wasm-bindgen-test", +] + [[package]] name = "eth2_interop_keypairs" version = "0.2.0" dependencies = [ - "base64 0.13.0", + "base64", "bls", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "hex", "lazy_static", "num-bigint", @@ -1639,7 +1691,7 @@ dependencies = [ "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand 0.7.3", + "rand 0.8.5", "scrypt", "serde", "serde_json", @@ -1679,7 +1731,7 @@ dependencies = [ name = "eth2_ssz" version = "0.4.1" dependencies = [ - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "ethereum-types 0.12.1", "itertools", "smallvec", @@ -1695,18 +1747,6 @@ dependencies = [ "syn", ] -[[package]] -name = "eth2_ssz_derive" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "635b86d2c941bb71e7419a571e1763d65c93e51a1bafc400352e3bef6ff59fc9" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "eth2_ssz_types" version = "0.2.2" @@ -1732,7 +1772,7 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand 0.7.3", + "rand 0.8.5", "serde", "serde_json", "serde_repr", @@ -1752,45 +1792,34 @@ dependencies = [ [[package]] name = "ethabi" -version = "12.0.0" +version = "16.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052a565e3de82944527d6d10a465697e6bb92476b772ca7141080c901f6a63c6" +checksum = "a4c98847055d934070b90e806e12d3936b787d0a115068981c1d8dfd5dfef5a5" dependencies = [ - "ethereum-types 0.9.2", - "rustc-hex", + "ethereum-types 0.12.1", + "hex", "serde", "serde_json", - "tiny-keccak 1.5.0", - "uint 0.8.5", + "sha3 0.9.1", + "thiserror", + "uint", ] [[package]] name = "ethabi" -version = "14.1.0" +version = "17.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01317735d563b3bad2d5f90d2e1799f414165408251abb762510f40e790e69a" +checksum = "b69517146dfab88e9238c00c724fd8e277951c3cc6f22b016d72f422a832213e" dependencies = [ - "anyhow", - "ethereum-types 0.11.0", + "ethereum-types 0.13.1", "hex", + "once_cell", + "regex", "serde", "serde_json", - "sha3", + "sha3 0.10.1", "thiserror", - "uint 0.9.3", -] - -[[package]] -name = "ethbloom" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a6567e6fd35589fea0c63b94b4cf2e55573e413901bdbe60ab15cf0e25e5df" -dependencies = [ - "crunchy", - "fixed-hash 0.6.1", - "impl-rlp 0.2.1", - "impl-serde", - "tiny-keccak 2.0.2", + "uint", ] [[package]] @@ -1800,38 +1829,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" dependencies = [ "crunchy", - "fixed-hash 0.7.0", - "impl-rlp 0.3.0", + "fixed-hash", + "impl-rlp", "impl-serde", - "tiny-keccak 2.0.2", + "tiny-keccak", ] [[package]] -name = "ethereum-types" -version = "0.9.2" +name = "ethbloom" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473aecff686bd8e7b9db0165cbbb53562376b39bf35b427f0c60446a9e1634b0" +checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" dependencies = [ - "ethbloom 0.9.2", - "fixed-hash 0.6.1", - "impl-rlp 0.2.1", + "crunchy", + "fixed-hash", + "impl-rlp", "impl-serde", - "primitive-types 0.7.3", - "uint 0.8.5", -] - -[[package]] -name = "ethereum-types" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64b5df66a228d85e4b17e5d6c6aa43b0310898ffe8a85988c4c032357aaabfd" -dependencies = [ - "ethbloom 0.11.1", - "fixed-hash 0.7.0", - "impl-rlp 0.3.0", - "impl-serde", - "primitive-types 0.9.1", - "uint 0.9.3", + "tiny-keccak", ] [[package]] @@ -1841,11 +1855,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" dependencies = [ "ethbloom 0.11.1", - "fixed-hash 0.7.0", - "impl-rlp 0.3.0", + "fixed-hash", + "impl-rlp", "impl-serde", "primitive-types 0.10.1", - "uint 0.9.3", + "uint", +] + +[[package]] +name = "ethereum-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" +dependencies = [ + "ethbloom 0.12.1", + "fixed-hash", + "impl-rlp", + "impl-serde", + "primitive-types 0.11.1", + "uint", +] + +[[package]] +name = "ethers-core" +version = "0.6.0" +source = "git+https://github.com/gakonst/ethers-rs?rev=02ad93a1cfb7b62eb051c77c61dc4c0218428e4a#02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" +dependencies = [ + "arrayvec", + "bytes", + "elliptic-curve 0.11.12", + "ethabi 17.0.0", + "generic-array", + "hex", + "k256 0.10.4", + "rand 0.8.5", + "rlp", + "rlp-derive", + "serde", + "serde_json", + "thiserror", + "tiny-keccak", ] [[package]] @@ -1875,6 +1924,7 @@ dependencies = [ "eth1", "eth2_serde_utils", "eth2_ssz_types", + "ethers-core", "exit-future", "futures", "hex", @@ -1882,8 +1932,8 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lru", - "parking_lot 0.11.2", - "rand 0.7.3", + "parking_lot 0.12.0", + "rand 0.8.5", "reqwest", "sensitive_url", "serde", @@ -1896,7 +1946,7 @@ dependencies = [ "tree_hash", "tree_hash_derive", "types", - "warp 0.3.0", + "warp", "zeroize", ] @@ -1948,6 +1998,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" +dependencies = [ + "rand_core 0.6.3", + "subtle", +] + [[package]] name = "ffi-opaque" version = "2.0.1" @@ -1972,18 +2032,6 @@ dependencies = [ "windows-acl", ] -[[package]] -name = "fixed-hash" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11498d382790b7a8f2fd211780bec78619bba81cdad3a283997c0c41f836759c" -dependencies = [ - "byteorder", - "rand 0.7.3", - "rustc-hex", - "static_assertions", -] - [[package]] name = "fixed-hash" version = "0.7.0" @@ -2042,7 +2090,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "proto_array", "store", "types", @@ -2080,6 +2128,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.3.21" @@ -2204,7 +2258,7 @@ dependencies = [ "environment", "eth1", "eth1_test_rig", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "eth2_ssz", "futures", "int_to_bytes", @@ -2292,7 +2346,18 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61b3c1e8b4f1ca07e6605ea1be903a5f6956aec5c8a67fd44d56076631675ed8" dependencies = [ - "ff", + "ff 0.9.0", + "rand_core 0.6.3", + "subtle", +] + +[[package]] +name = "group" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" +dependencies = [ + "ff 0.11.1", "rand_core 0.6.3", "subtle", ] @@ -2355,7 +2420,7 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" dependencies = [ - "base64 0.13.0", + "base64", "bitflags", "bytes", "headers-core", @@ -2494,14 +2559,16 @@ dependencies = [ "eth1", "eth2", "eth2_ssz", + "execution_layer", "futures", "hex", "lazy_static", "lighthouse_metrics", "lighthouse_network", "lighthouse_version", + "logging", "network", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "safe_arith", "sensitive_url", "serde", @@ -2509,11 +2576,12 @@ dependencies = [ "slot_clock", "state_processing", "store", + "task_executor", "tokio", "tokio-stream", "tree_hash", "types", - "warp 0.3.2", + "warp", "warp_utils", ] @@ -2534,7 +2602,7 @@ dependencies = [ "store", "tokio", "types", - "warp 0.3.2", + "warp", "warp_utils", ] @@ -2654,15 +2722,6 @@ dependencies = [ "xmltree", ] -[[package]] -name = "impl-codec" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" -dependencies = [ - "parity-scale-codec 1.3.7", -] - [[package]] name = "impl-codec" version = "0.5.1" @@ -2673,12 +2732,12 @@ dependencies = [ ] [[package]] -name = "impl-rlp" -version = "0.2.1" +name = "impl-codec" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f7a72f11830b52333f36e3b09a288333888bf54380fd0ac0790a3c31ab0f3c5" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "rlp 0.4.6", + "parity-scale-codec 3.1.2", ] [[package]] @@ -2687,7 +2746,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" dependencies = [ - "rlp 0.5.1", + "rlp", ] [[package]] @@ -2720,15 +2779,6 @@ dependencies = [ "hashbrown", ] -[[package]] -name = "input_buffer" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" -dependencies = [ - "bytes", -] - [[package]] name = "instant" version = "0.1.12" @@ -2855,7 +2905,7 @@ version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "012bb02250fdd38faa5feee63235f7a459974440b9b57593822414c31f92839e" dependencies = [ - "base64 0.13.0", + "base64", "pem", "ring", "serde", @@ -2870,11 +2920,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3e8e491ed22bc161583a1c77e42313672c483eba6bd9d7afec0f1131d0b9ce" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.11.1", + "elliptic-curve 0.9.12", "sha2 0.9.9", ] +[[package]] +name = "k256" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +dependencies = [ + "cfg-if", + "ecdsa 0.13.4", + "elliptic-curve 0.11.12", + "sec1", + "sha3 0.9.1", +] + [[package]] name = "keccak" version = "0.1.0" @@ -2898,7 +2961,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.1.5" +version = "2.2.1" dependencies = [ "account_utils", "bls", @@ -3059,7 +3122,7 @@ dependencies = [ "futures-timer", "instant", "lazy_static", - "libsecp256k1 0.7.0", + "libsecp256k1", "log", "multiaddr 0.13.0", "multihash 0.14.0", @@ -3094,7 +3157,7 @@ dependencies = [ "futures-timer", "instant", "lazy_static", - "libsecp256k1 0.7.0", + "libsecp256k1", "log", "multiaddr 0.14.0", "multihash 0.16.1", @@ -3134,7 +3197,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f62943fba0b0dae02b87868620c52a581c54ec9fb04b5e195cf20313fc510c3" dependencies = [ "asynchronous-codec", - "base64 0.13.0", + "base64", "byteorder", "bytes", "fnv", @@ -3302,7 +3365,7 @@ dependencies = [ "log", "quicksink", "rw-stream-sink", - "soketto 0.7.1", + "soketto", "url", "webpki-roots", ] @@ -3320,25 +3383,6 @@ dependencies = [ "yamux", ] -[[package]] -name = "libsecp256k1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" -dependencies = [ - "arrayref", - "base64 0.12.3", - "digest 0.9.0", - "hmac-drbg", - "libsecp256k1-core 0.2.2", - "libsecp256k1-gen-ecmult 0.2.1", - "libsecp256k1-gen-genmult 0.2.1", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "typenum", -] - [[package]] name = "libsecp256k1" version = "0.7.0" @@ -3346,29 +3390,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" dependencies = [ "arrayref", - "base64 0.13.0", + "base64", "digest 0.9.0", "hmac-drbg", - "libsecp256k1-core 0.3.0", - "libsecp256k1-gen-ecmult 0.3.0", - "libsecp256k1-gen-genmult 0.3.0", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", "rand 0.8.5", "serde", "sha2 0.9.9", "typenum", ] -[[package]] -name = "libsecp256k1-core" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - [[package]] name = "libsecp256k1-core" version = "0.3.0" @@ -3380,31 +3413,13 @@ dependencies = [ "subtle", ] -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccab96b584d38fac86a83f07e659f0deafd0253dc096dab5a36d53efe653c5c3" -dependencies = [ - "libsecp256k1-core 0.2.2", -] - [[package]] name = "libsecp256k1-gen-ecmult" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" dependencies = [ - "libsecp256k1-core 0.3.0", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67abfe149395e3aa1c48a2beb32b068e2334402df8181f818d3aee2b304c4f5d" -dependencies = [ - "libsecp256k1-core 0.2.2", + "libsecp256k1-core", ] [[package]] @@ -3413,7 +3428,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" dependencies = [ - "libsecp256k1-core 0.3.0", + "libsecp256k1-core", ] [[package]] @@ -3440,7 +3455,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.1.5" +version = "2.2.1" dependencies = [ "account_manager", "account_utils", @@ -3453,7 +3468,7 @@ dependencies = [ "directory", "env_logger 0.9.0", "environment", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "eth2_network_config", "futures", "lazy_static", @@ -3494,7 +3509,7 @@ dependencies = [ "discv5", "error-chain", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "eth2_ssz_types", "exit-future", "fnv", @@ -3506,9 +3521,9 @@ dependencies = [ "lighthouse_metrics", "lighthouse_version", "lru", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "prometheus-client", - "rand 0.7.3", + "rand 0.8.5", "regex", "serde", "serde_derive", @@ -3518,11 +3533,11 @@ dependencies = [ "slog-term", "smallvec", "snap", - "strum 0.21.0", + "strum", "superstruct", "task_executor", "tempfile", - "tiny-keccak 2.0.2", + "tiny-keccak", "tokio", "tokio-io-timeout", "tokio-util", @@ -3627,7 +3642,7 @@ dependencies = [ "lazy_static", "libc", "lighthouse_metrics", - "parking_lot 0.11.2", + "parking_lot 0.12.0", ] [[package]] @@ -3688,7 +3703,7 @@ dependencies = [ name = "merkle_proof" version = "0.2.0" dependencies = [ - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "ethereum-types 0.12.1", "lazy_static", "quickcheck 0.9.2", @@ -3714,9 +3729,9 @@ version = "0.1.0" source = "git+https://github.com/sigp/milhouse?branch=main#30c87f256deacc381fb15e39ccc4a281445f98da" dependencies = [ "derivative", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.2.0", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "itertools", "parking_lot 0.11.2", "rayon", @@ -3896,24 +3911,6 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" -[[package]] -name = "multipart" -version = "0.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050aeedc89243f5347c3e237e3e13dc76fbe4ae3742a57b94dc14f69acf76d4" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand 0.7.3", - "safemem", - "tempfile", - "twoway", -] - [[package]] name = "multipart" version = "0.18.0" @@ -3983,6 +3980,7 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", + "derivative", "environment", "error-chain", "eth2_ssz", @@ -4003,8 +4001,8 @@ dependencies = [ "lru_cache", "matches", "num_cpus", - "rand 0.7.3", - "rlp 0.5.1", + "rand 0.8.5", + "rlp", "slog", "slog-async", "slog-term", @@ -4012,7 +4010,7 @@ dependencies = [ "slot_clock", "smallvec", "store", - "strum 0.21.0", + "strum", "task_executor", "tokio", "tokio-stream", @@ -4020,19 +4018,6 @@ dependencies = [ "types", ] -[[package]] -name = "nix" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77d9f3521ea8e0641a153b3cddaf008dcbf26acd4ed739a2517295e0760d12c7" -dependencies = [ - "bitflags", - "cc", - "cfg-if", - "libc", - "memoffset", -] - [[package]] name = "nix" version = "0.23.1" @@ -4248,11 +4233,11 @@ dependencies = [ "beacon_chain", "derivative", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "itertools", "lazy_static", "lighthouse_metrics", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "rayon", "serde", "serde_derive", @@ -4272,27 +4257,29 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.3.7" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4b26b16c7687c3075982af47719e481815df30bc544f7a6690763a25ca16e9d" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ - "arrayvec 0.5.2", - "bitvec 0.17.4", - "byte-slice-cast 0.3.5", + "arrayvec", + "bitvec 0.20.4", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive 2.3.1", "serde", ] [[package]] name = "parity-scale-codec" -version = "2.3.1" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" dependencies = [ - "arrayvec 0.7.2", - "bitvec 0.20.4", - "byte-slice-cast 1.2.1", + "arrayvec", + "bitvec 1.0.0", + "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive", + "parity-scale-codec-derive 3.1.2", "serde", ] @@ -4308,6 +4295,18 @@ dependencies = [ "syn", ] +[[package]] +name = "parity-scale-codec-derive" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c45ed1f39709f5a89338fab50e59816b2e8815f5bb58276e7ddf9afd495f73f8" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "parking_lot" version = "0.11.2" @@ -4392,7 +4391,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9a3b09a20e374558580a4914d3b7d89bd61b954a5a5e1dcbea98753addb1947" dependencies = [ - "base64 0.13.0", + "base64", ] [[package]] @@ -4484,8 +4483,19 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9c2f795bc591cb3384cb64082a578b89207ac92bb89c9d98c1ea2ace7cd8110" dependencies = [ - "der", - "spki", + "der 0.3.5", + "spki 0.3.0", +] + +[[package]] +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +dependencies = [ + "der 0.5.1", + "spki 0.5.4", + "zeroize", ] [[package]] @@ -4496,9 +4506,9 @@ checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "platforms" -version = "1.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" +checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" @@ -4534,7 +4544,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ - "cpufeatures 0.2.1", + "cpufeatures 0.2.2", "opaque-debug", "universal-hash", ] @@ -4546,7 +4556,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" dependencies = [ "cfg-if", - "cpufeatures 0.2.1", + "cpufeatures 0.2.2", "opaque-debug", "universal-hash", ] @@ -4557,43 +4567,30 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" -[[package]] -name = "primitive-types" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd39dcacf71411ba488570da7bbc89b717225e46478b30ba99b92db6b149809" -dependencies = [ - "fixed-hash 0.6.1", - "impl-codec 0.4.2", - "impl-rlp 0.2.1", - "impl-serde", - "uint 0.8.5", -] - -[[package]] -name = "primitive-types" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06345ee39fbccfb06ab45f3a1a5798d9dafa04cb8921a76d227040003a234b0e" -dependencies = [ - "fixed-hash 0.7.0", - "impl-codec 0.5.1", - "impl-rlp 0.3.0", - "impl-serde", - "uint 0.9.3", -] - [[package]] name = "primitive-types" version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" dependencies = [ - "fixed-hash 0.7.0", + "fixed-hash", "impl-codec 0.5.1", - "impl-rlp 0.3.0", + "impl-rlp", "impl-serde", - "uint 0.9.3", + "uint", +] + +[[package]] +name = "primitive-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +dependencies = [ + "fixed-hash", + "impl-codec 0.6.0", + "impl-rlp", + "impl-serde", + "uint", ] [[package]] @@ -4753,7 +4750,7 @@ name = "proto_array" version = "0.2.0" dependencies = [ "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "serde", "serde_derive", "serde_yaml", @@ -4768,15 +4765,16 @@ checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" [[package]] name = "psutil" -version = "3.2.1" -source = "git+https://github.com/sigp/rust-psutil?rev=b3e44bc7ec5d545b8cb8ad4e3dffe074b6e6336b#b3e44bc7ec5d545b8cb8ad4e3dffe074b6e6336b" +version = "3.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f866af2b0f8e4b0d2d00aad8a9c5fc48fad33466cd99a64cbb3a4c1505f1a62d" dependencies = [ "cfg-if", "darwin-libproc", "derive_more", "glob", "mach", - "nix 0.21.2", + "nix", "num_cpus", "once_cell", "platforms", @@ -4863,24 +4861,18 @@ dependencies = [ "rusqlite", ] -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - -[[package]] -name = "radium" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" - [[package]] name = "radium" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.7.3" @@ -4892,7 +4884,6 @@ dependencies = [ "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc", - "rand_pcg", ] [[package]] @@ -4953,22 +4944,13 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_pcg" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - [[package]] name = "rand_xorshift" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77d416b86801d23dde1aa643023b775c3a462efc0ed96443add11546cdf1dca8" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.5.1", + "rand_core 0.6.3", ] [[package]] @@ -5056,7 +5038,7 @@ version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" dependencies = [ - "base64 0.13.0", + "base64", "bytes", "encoding_rs", "futures-core", @@ -5097,6 +5079,17 @@ dependencies = [ "quick-error", ] +[[package]] +name = "rfc6979" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +dependencies = [ + "crypto-bigint", + "hmac 0.11.0", + "zeroize", +] + [[package]] name = "ring" version = "0.16.20" @@ -5118,15 +5111,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" -[[package]] -name = "rlp" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1190dcc8c3a512f1eef5d09bb8c84c7f39e1054e174d1795482e18f5272f2e73" -dependencies = [ - "rustc-hex", -] - [[package]] name = "rlp" version = "0.5.1" @@ -5137,6 +5121,17 @@ dependencies = [ "rustc-hex", ] +[[package]] +name = "rlp-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "rpassword" version = "5.0.1" @@ -5222,7 +5217,7 @@ version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64 0.13.0", + "base64", "log", "ring", "sct 0.6.1", @@ -5356,10 +5351,23 @@ dependencies = [ ] [[package]] -name = "secp256k1" -version = "0.20.3" +name = "sec1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" +checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +dependencies = [ + "der 0.5.1", + "generic-array", + "pkcs8 0.8.0", + "subtle", + "zeroize", +] + +[[package]] +name = "secp256k1" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" dependencies = [ "secp256k1-sys", ] @@ -5537,7 +5545,7 @@ checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ "block-buffer 0.9.0", "cfg-if", - "cpufeatures 0.2.1", + "cpufeatures 0.2.2", "digest 0.9.0", "opaque-debug", ] @@ -5549,7 +5557,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", - "cpufeatures 0.2.1", + "cpufeatures 0.2.2", "digest 0.10.3", ] @@ -5561,7 +5569,7 @@ checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", "cfg-if", - "cpufeatures 0.2.1", + "cpufeatures 0.2.2", "digest 0.9.0", "opaque-debug", ] @@ -5573,7 +5581,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" dependencies = [ "cfg-if", - "cpufeatures 0.2.1", + "cpufeatures 0.2.2", "digest 0.10.3", ] @@ -5589,6 +5597,16 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha3" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" +dependencies = [ + "digest 0.10.3", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -5645,7 +5663,7 @@ dependencies = [ "eth1_test_rig", "futures", "node_test_rig", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "rayon", "sensitive_url", "tokio", @@ -5665,7 +5683,7 @@ dependencies = [ "bincode", "byteorder", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "filesystem", "flate2", "lazy_static", @@ -5674,8 +5692,8 @@ dependencies = [ "logging", "lru", "maplit", - "parking_lot 0.11.2", - "rand 0.7.3", + "parking_lot 0.12.0", + "rand 0.8.5", "rayon", "safe_arith", "serde", @@ -5829,7 +5847,7 @@ version = "0.2.0" dependencies = [ "lazy_static", "lighthouse_metrics", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "types", ] @@ -5883,28 +5901,13 @@ dependencies = [ "winapi", ] -[[package]] -name = "soketto" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4919971d141dbadaa0e82b5d369e2d7666c98e4625046140615ca363e50d4daa" -dependencies = [ - "base64 0.13.0", - "bytes", - "futures", - "httparse", - "log", - "rand 0.8.5", - "sha-1 0.9.8", -] - [[package]] name = "soketto" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "base64 0.13.0", + "base64", "bytes", "flate2", "futures", @@ -5926,7 +5929,17 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dae7e047abc519c96350e9484a96c6bf1492348af912fd3446dd2dc323f6268" dependencies = [ - "der", + "der 0.3.5", +] + +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der 0.5.1", ] [[package]] @@ -5943,7 +5956,7 @@ dependencies = [ "beacon_chain", "bls", "env_logger 0.9.0", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "eth2_ssz", "eth2_ssz_types", "int_to_bytes", @@ -5985,20 +5998,20 @@ dependencies = [ "db-key", "directory", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "itertools", "lazy_static", "leveldb", "lighthouse_metrics", "lru", - "parking_lot 0.11.2", + "parking_lot 0.12.0", "safe_arith", "serde", "serde_derive", "slog", "sloggers", "state_processing", - "strum 0.24.0", + "strum", "take-until", "tempfile", "tree_hash", @@ -6018,34 +6031,13 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" -[[package]] -name = "strum" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2" -dependencies = [ - "strum_macros 0.21.1", -] - [[package]] name = "strum" version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e96acfc1b70604b8b2f1ffa4c57e59176c7dbb05d556c71ecd2f5498a1dee7f8" dependencies = [ - "strum_macros 0.24.0", -] - -[[package]] -name = "strum_macros" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec" -dependencies = [ - "heck 0.3.3", - "proc-macro2", - "quote", - "syn", + "strum_macros", ] [[package]] @@ -6069,14 +6061,15 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "superstruct" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e623e69a04a6352677c1f892027e14e034dfc6c4aabed0a4a0be9c1a0a46cee" +checksum = "95a99807a055ff4ff5d249bb84c80d9eabb55ca3c452187daae43fd5b51ef695" dependencies = [ "darling", "itertools", "proc-macro2", "quote", + "smallvec", "syn", ] @@ -6085,7 +6078,7 @@ name = "swap_or_not_shuffle" version = "0.2.0" dependencies = [ "criterion", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "ethereum-types 0.12.1", ] @@ -6152,6 +6145,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "slog", + "sloggers", "tokio", ] @@ -6311,15 +6305,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "tiny-keccak" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2" -dependencies = [ - "crunchy", -] - [[package]] name = "tiny-keccak" version = "2.0.2" @@ -6428,19 +6413,6 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "tokio-tungstenite" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" -dependencies = [ - "futures-util", - "log", - "pin-project 1.0.10", - "tokio", - "tungstenite 0.12.0", -] - [[package]] name = "tokio-tungstenite" version = "0.15.0" @@ -6451,7 +6423,7 @@ dependencies = [ "log", "pin-project 1.0.10", "tokio", - "tungstenite 0.14.0", + "tungstenite", ] [[package]] @@ -6519,16 +6491,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project 1.0.10", - "tracing", -] - [[package]] name = "tracing-log" version = "0.1.2" @@ -6582,11 +6544,11 @@ name = "tree_hash" version = "0.4.1" dependencies = [ "beacon_chain", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "ethereum-types 0.12.1", - "rand 0.7.3", + "rand 0.8.5", "smallvec", "tree_hash_derive", "types", @@ -6663,32 +6625,13 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "tungstenite" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" -dependencies = [ - "base64 0.13.0", - "byteorder", - "bytes", - "http", - "httparse", - "input_buffer", - "log", - "rand 0.8.5", - "sha-1 0.9.8", - "url", - "utf-8", -] - [[package]] name = "tungstenite" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" dependencies = [ - "base64 0.13.0", + "base64", "byteorder", "bytes", "http", @@ -6728,11 +6671,11 @@ dependencies = [ "compare_fields_derive", "criterion", "derivative", - "eth2_hashing 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.3.0", "eth2_interop_keypairs", "eth2_serde_utils", "eth2_ssz", - "eth2_ssz_derive 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz_derive", "eth2_ssz_types", "ethereum-types 0.12.1", "hex", @@ -6741,8 +6684,8 @@ dependencies = [ "lazy_static", "log", "milhouse", - "parking_lot 0.11.2", - "rand 0.7.3", + "parking_lot 0.12.0", + "rand 0.8.5", "rand_xorshift", "rayon", "regex", @@ -6770,18 +6713,6 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" -[[package]] -name = "uint" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9db035e67dfaf7edd9aebfe8676afcd63eed53c8a4044fed514c8cccf1835177" -dependencies = [ - "byteorder", - "crunchy", - "rustc-hex", - "static_assertions", -] - [[package]] name = "uint" version = "0.9.3" @@ -6934,14 +6865,14 @@ dependencies = [ "hyper", "itertools", "lazy_static", - "libsecp256k1 0.6.0", + "libsecp256k1", "lighthouse_metrics", "lighthouse_version", "lockfile", "logging", "monitoring_api", - "parking_lot 0.11.2", - "rand 0.7.3", + "parking_lot 0.12.0", + "rand 0.8.5", "reqwest", "ring", "safe_arith", @@ -6959,7 +6890,7 @@ dependencies = [ "types", "url", "validator_dir", - "warp 0.3.2", + "warp", "warp_utils", ] @@ -6974,7 +6905,7 @@ dependencies = [ "filesystem", "hex", "lockfile", - "rand 0.7.3", + "rand 0.8.5", "tempfile", "tree_hash", "types", @@ -7031,40 +6962,10 @@ dependencies = [ "try-lock", ] -[[package]] -name = "warp" -version = "0.3.0" -source = "git+https://github.com/macladson/warp?rev=dfa259e#dfa259e19b7490e6bc4bf247e8b76f671d29a0eb" -dependencies = [ - "bytes", - "futures", - "headers", - "http", - "hyper", - "log", - "mime", - "mime_guess", - "multipart 0.17.1", - "percent-encoding", - "pin-project 1.0.10", - "scoped-tls", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-rustls", - "tokio-stream", - "tokio-tungstenite 0.13.0", - "tokio-util", - "tower-service", - "tracing", - "tracing-futures", -] - [[package]] name = "warp" version = "0.3.2" -source = "git+https://github.com/macladson/warp?rev=7e75acc#7e75acc368229a46a236a8c991bf251fe7fe50ef" +source = "git+https://github.com/macladson/warp?rev=7e75acc368229a46a236a8c991bf251fe7fe50ef#7e75acc368229a46a236a8c991bf251fe7fe50ef" dependencies = [ "bytes", "futures-channel", @@ -7075,7 +6976,7 @@ dependencies = [ "log", "mime", "mime_guess", - "multipart 0.18.0", + "multipart", "percent-encoding", "pin-project 1.0.10", "scoped-tls", @@ -7085,7 +6986,7 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-stream", - "tokio-tungstenite 0.15.0", + "tokio-tungstenite", "tokio-util", "tower-service", "tracing", @@ -7106,7 +7007,7 @@ dependencies = [ "state_processing", "tokio", "types", - "warp 0.3.2", + "warp", ] [[package]] @@ -7238,31 +7139,33 @@ dependencies = [ [[package]] name = "web3" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd24abe6f2b68e0677f843059faea87bcbd4892e39f02886f366d8222c3c540d" +checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" dependencies = [ - "arrayvec 0.5.2", - "base64 0.13.0", + "arrayvec", + "base64", "bytes", "derive_more", - "ethabi 14.1.0", - "ethereum-types 0.11.0", + "ethabi 16.0.0", + "ethereum-types 0.12.1", "futures", "futures-timer", "headers", "hex", + "idna", "jsonrpc-core", "log", - "parking_lot 0.11.2", + "once_cell", + "parking_lot 0.12.0", "pin-project 1.0.10", "reqwest", - "rlp 0.5.1", + "rlp", "secp256k1", "serde", "serde_json", - "soketto 0.5.0", - "tiny-keccak 2.0.2", + "soketto", + "tiny-keccak", "tokio", "tokio-util", "url", @@ -7462,6 +7365,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +[[package]] +name = "wyz" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" +dependencies = [ + "tap", +] + [[package]] name = "x25519-dalek" version = "1.1.1" diff --git a/Cargo.toml b/Cargo.toml index 46893a7e78..f031a09d37 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,9 +91,11 @@ members = [ [patch] [patch.crates-io] fixed-hash = { git = "https://github.com/paritytech/parity-common", rev="df638ab0885293d21d656dc300d39236b69ce57d" } -warp = { git = "https://github.com/macladson/warp", rev ="7e75acc" } +warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } eth2_ssz = { path = "consensus/ssz" } +eth2_ssz_derive = { path = "consensus/ssz_derive" } eth2_ssz_types = { path = "consensus/ssz_types" } +eth2_hashing = { path = "crypto/eth2_hashing" } tree_hash = { path = "consensus/tree_hash" } tree_hash_derive = { path = "consensus/tree_hash_derive" } eth2_serde_utils = { path = "consensus/serde_utils" } diff --git a/README.md b/README.md index acf5f5926d..aa3cc020e1 100644 --- a/README.md +++ b/README.md @@ -2,10 +2,8 @@ An open-source Ethereum consensus client, written in Rust and maintained by Sigma Prime. -[![Build Status]][Build Link] [![Book Status]][Book Link] [![Chat Badge]][Chat Link] +[![Book Status]][Book Link] [![Chat Badge]][Chat Link] -[Build Status]: https://github.com/sigp/lighthouse/workflows/test-suite/badge.svg?branch=stable -[Build Link]: https://github.com/sigp/lighthouse/actions [Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da [Chat Link]: https://discord.gg/cyAszAh [Book Status]:https://img.shields.io/badge/user--docs-unstable-informational @@ -43,7 +41,7 @@ as the canonical staking deposit contract address. The [Lighthouse Book](https://lighthouse-book.sigmaprime.io) contains information for users and developers. -The Lighthouse team maintains a blog at [lighthouse.sigmaprime.io][blog] which contains periodical +The Lighthouse team maintains a blog at [lighthouse-blog.sigmaprime.io][blog] which contains periodical progress updates, roadmap insights and interesting findings. ## Branches diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index e56a70472c..f25bbd8159 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -158,7 +158,7 @@ pub fn cli_run( InterchangeImportOutcome::Success { pubkey, summary } => { eprintln!("- {:?}", pubkey); eprintln!( - " - latest block: {}", + " - latest proposed block: {}", display_slot(summary.max_block_slot) ); eprintln!( diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 92b7356c55..986ff7a615 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.1.5" +version = "2.2.1" authors = ["Paul Hauner ", "Age Manning ; @@ -331,6 +335,10 @@ pub struct BeaconChain { /// A state-machine that is updated with information from the network and chooses a canonical /// head block. pub fork_choice: RwLock>, + /// Transmitter used to indicate that slot-start fork choice has completed running. + pub fork_choice_signal_tx: Option, + /// Receiver used by block production to wait on slot-start fork choice. + pub fork_choice_signal_rx: Option, /// A handler for events generated by the beacon chain. This is only initialized when the /// HTTP server is enabled. pub event_handler: Option>, @@ -363,7 +371,7 @@ pub struct BeaconChain { pub validator_monitor: RwLock>, } -type BeaconBlockAndState = (BeaconBlock, BeaconState); +type BeaconBlockAndState = (BeaconBlock, BeaconState); impl BeaconChain { /// Persists the head tracker and fork choice. @@ -575,7 +583,7 @@ impl BeaconChain { block_root: Hash256, ) -> Result> + '_, Error> { let block = self - .get_block(&block_root)? + .get_blinded_block(&block_root)? .ok_or(Error::MissingBeaconBlock(block_root))?; let state = self .get_state(&block.state_root(), Some(block.slot()))? @@ -735,11 +743,11 @@ impl BeaconChain { &self, request_slot: Slot, skips: WhenSlotSkipped, - ) -> Result>, Error> { + ) -> Result>, Error> { let root = self.block_root_at_slot(request_slot, skips)?; if let Some(block_root) = root { - Ok(self.store.get_block(&block_root)?) + Ok(self.store.get_blinded_block(&block_root)?) } else { Ok(None) } @@ -944,16 +952,14 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. - pub fn get_block_checking_early_attester_cache( + pub async fn get_block_checking_early_attester_cache( &self, block_root: &Hash256, ) -> Result>, Error> { - let block_opt = self - .store - .get_block(block_root)? - .or_else(|| self.early_attester_cache.get_block(*block_root)); - - Ok(block_opt) + if let Some(block) = self.early_attester_cache.get_block(*block_root) { + return Ok(Some(block)); + } + self.get_block(block_root).await } /// Returns the block at the given root, if any. @@ -961,11 +967,69 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. - pub fn get_block( + pub async fn get_block( &self, block_root: &Hash256, ) -> Result>, Error> { - Ok(self.store.get_block(block_root)?) + // Load block from database, returning immediately if we have the full block w payload + // stored. + let blinded_block = match self.store.try_get_full_block(block_root)? { + Some(DatabaseBlock::Full(block)) => return Ok(Some(block)), + Some(DatabaseBlock::Blinded(block)) => block, + None => return Ok(None), + }; + + // If we only have a blinded block, load the execution payload from the EL. + let block_message = blinded_block.message(); + let execution_payload_header = &block_message + .execution_payload() + .map_err(|_| Error::BlockVariantLacksExecutionPayload(*block_root))? + .execution_payload_header; + + let exec_block_hash = execution_payload_header.block_hash; + + let execution_payload = self + .execution_layer + .as_ref() + .ok_or(Error::ExecutionLayerMissing)? + .get_payload_by_block_hash(exec_block_hash) + .await + .map_err(|e| Error::ExecutionLayerErrorPayloadReconstruction(exec_block_hash, e))? + .ok_or(Error::BlockHashMissingFromExecutionLayer(exec_block_hash))?; + + // Verify payload integrity. + let header_from_payload = ExecutionPayloadHeader::from(&execution_payload); + if header_from_payload != *execution_payload_header { + for txn in &execution_payload.transactions { + debug!( + self.log, + "Reconstructed txn"; + "bytes" => format!("0x{}", hex::encode(&**txn)), + ); + } + + return Err(Error::InconsistentPayloadReconstructed { + slot: blinded_block.slot(), + exec_block_hash, + canonical_payload_root: execution_payload_header.tree_hash_root(), + reconstructed_payload_root: header_from_payload.tree_hash_root(), + canonical_transactions_root: execution_payload_header.transactions_root, + reconstructed_transactions_root: header_from_payload.transactions_root, + }); + } + + // Add the payload to the block to form a full block. + blinded_block + .try_into_full_block(Some(execution_payload)) + .ok_or(Error::AddPayloadLogicError) + .map(Some) + } + + pub fn get_blinded_block( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + Ok(self.store.get_blinded_block(block_root)?) } /// Returns the state at the given root, if any. @@ -1131,7 +1195,7 @@ impl BeaconChain { .body() .execution_payload() .ok() - .map(|ep| ep.block_hash), + .map(|ep| ep.block_hash()), random, }) }) @@ -1409,8 +1473,13 @@ impl BeaconChain { pub fn get_aggregated_attestation( &self, data: &AttestationData, - ) -> Option> { - self.naive_aggregation_pool.read().get(data) + ) -> Result>, Error> { + if let Some(attestation) = self.naive_aggregation_pool.read().get(data) { + self.filter_optimistic_attestation(attestation) + .map(Option::Some) + } else { + Ok(None) + } } /// Returns an aggregated `Attestation`, if any, that has a matching @@ -1421,10 +1490,43 @@ impl BeaconChain { &self, slot: Slot, attestation_data_root: &Hash256, - ) -> Option> { - self.naive_aggregation_pool + ) -> Result>, Error> { + if let Some(attestation) = self + .naive_aggregation_pool .read() .get_by_slot_and_root(slot, attestation_data_root) + { + self.filter_optimistic_attestation(attestation) + .map(Option::Some) + } else { + Ok(None) + } + } + + /// Returns `Ok(attestation)` if the supplied `attestation` references a valid + /// `beacon_block_root`. + fn filter_optimistic_attestation( + &self, + attestation: Attestation, + ) -> Result, Error> { + let beacon_block_root = attestation.data.beacon_block_root; + match self + .fork_choice + .read() + .get_block_execution_status(&beacon_block_root) + { + // The attestation references a block that is not in fork choice, it must be + // pre-finalization. + None => Err(Error::CannotAttestToFinalizedBlock { beacon_block_root }), + // The attestation references a fully valid `beacon_block_root`. + Some(execution_status) if execution_status.is_valid_or_irrelevant() => Ok(attestation), + // The attestation references a block that has not been verified by an EL (i.e. it + // is optimistic or invalid). Don't return the block, return an error instead. + Some(execution_status) => Err(Error::HeadBlockNotFullyVerified { + beacon_block_root, + execution_status, + }), + } } /// Return an aggregated `SyncCommitteeContribution` matching the given `root`. @@ -1460,6 +1562,8 @@ impl BeaconChain { // // In effect, the early attester cache prevents slow database IO from causing missed // head/target votes. + // + // The early attester cache should never contain an optimistically imported block. match self .early_attester_cache .try_attest(request_slot, request_index, &self.spec) @@ -1576,6 +1680,22 @@ impl BeaconChain { } drop(head_timer); + // Only attest to a block if it is fully verified (i.e. not optimistic or invalid). + match self + .fork_choice + .read() + .get_block_execution_status(&beacon_block_root) + { + Some(execution_status) if execution_status.is_valid_or_irrelevant() => (), + Some(execution_status) => { + return Err(Error::HeadBlockNotFullyVerified { + beacon_block_root, + execution_status, + }) + } + None => return Err(Error::HeadMissingFromForkChoice(beacon_block_root)), + }; + /* * Phase 2/2: * @@ -1636,64 +1756,6 @@ impl BeaconChain { }) } - /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to - /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the - /// `block` identified by `beacon_block_root`. - /// - /// The attestation doesn't _really_ have anything about it that makes it unaggregated per say, - /// however this function is only required in the context of forming an unaggregated - /// attestation. It would be an (undetectable) violation of the protocol to create a - /// `SignedAggregateAndProof` based upon the output of this function. - pub fn produce_unaggregated_attestation_for_block( - &self, - slot: Slot, - index: CommitteeIndex, - beacon_block_root: Hash256, - mut state: Cow>, - state_root: Hash256, - ) -> Result, Error> { - let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); - - if state.slot() > slot { - return Err(Error::CannotAttestToFutureState); - } else if state.current_epoch() < epoch { - let mut_state = state.to_mut(); - // Only perform a "partial" state advance since we do not require the state roots to be - // accurate. - partial_state_advance( - mut_state, - Some(state_root), - epoch.start_slot(T::EthSpec::slots_per_epoch()), - &self.spec, - )?; - mut_state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - } - - let committee_len = state.get_beacon_committee(slot, index)?.committee.len(); - - let target_slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); - let target_root = if state.slot() <= target_slot { - beacon_block_root - } else { - *state.get_block_root(target_slot)? - }; - - Ok(Attestation { - aggregation_bits: BitList::with_capacity(committee_len)?, - data: AttestationData { - slot, - index, - beacon_block_root, - source: state.current_justified_checkpoint(), - target: Checkpoint { - epoch, - root: target_root, - }, - }, - signature: AggregateSignature::empty(), - }) - } - /// Performs the same validation as `Self::verify_unaggregated_attestation_for_gossip`, but for /// multiple attestations using batch BLS verification. Batch verification can provide /// significant CPU-time savings compared to individual verification. @@ -2190,7 +2252,7 @@ impl BeaconChain { /// This method is generally much more efficient than importing each block using /// `Self::process_block`. pub fn process_chain_segment( - &self, + self: &Arc, chain_segment: Vec>, ) -> ChainSegmentResult { let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len()); @@ -2384,7 +2446,7 @@ impl BeaconChain { /// Returns an `Err` if the given block was invalid, or an error was encountered during /// verification. pub fn process_block>( - &self, + self: &Arc, unverified_block: B, ) -> Result> { // Start the Prometheus timer. @@ -2657,13 +2719,20 @@ impl BeaconChain { } } - // If the block is recent enough, check to see if it becomes the head block. If so, apply it - // to the early attester cache. This will allow attestations to the block without waiting - // for the block and state to be inserted to the database. + // If the block is recent enough and it was not optimistically imported, check to see if it + // becomes the head block. If so, apply it to the early attester cache. This will allow + // attestations to the block without waiting for the block and state to be inserted to the + // database. // // Only performing this check on recent blocks avoids slowing down sync with lots of calls // to fork choice `get_head`. - if block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot { + // + // Optimistically imported blocks are not added to the cache since the cache is only useful + // for a small window of time and the complexity of keeping track of the optimistic status + // is not worth it. + if !payload_verification_status.is_optimistic() + && block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot + { let new_head_root = fork_choice .get_head(current_slot, &self.spec) .map_err(BeaconChainError::from)?; @@ -2844,16 +2913,68 @@ impl BeaconChain { Ok(block_root) } + /// If configured, wait for the fork choice run at the start of the slot to complete. + fn wait_for_fork_choice_before_block_production( + self: &Arc, + slot: Slot, + ) -> Result<(), BlockProductionError> { + if let Some(rx) = &self.fork_choice_signal_rx { + let current_slot = self + .slot() + .map_err(|_| BlockProductionError::UnableToReadSlot)?; + + let timeout = Duration::from_millis(self.config.fork_choice_before_proposal_timeout_ms); + + if slot == current_slot || slot == current_slot + 1 { + match rx.wait_for_fork_choice(slot, timeout) { + ForkChoiceWaitResult::Success(fc_slot) => { + debug!( + self.log, + "Fork choice successfully updated before block production"; + "slot" => slot, + "fork_choice_slot" => fc_slot, + ); + } + ForkChoiceWaitResult::Behind(fc_slot) => { + warn!( + self.log, + "Fork choice notifier out of sync with block production"; + "fork_choice_slot" => fc_slot, + "slot" => slot, + "message" => "this block may be orphaned", + ); + } + ForkChoiceWaitResult::TimeOut => { + warn!( + self.log, + "Timed out waiting for fork choice before proposal"; + "message" => "this block may be orphaned", + ); + } + } + } else { + error!( + self.log, + "Producing block at incorrect slot"; + "block_slot" => slot, + "current_slot" => current_slot, + "message" => "check clock sync, this block may be orphaned", + ); + } + } + Ok(()) + } + /// Produce a new block at the given `slot`. /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. - pub fn produce_block( - &self, + pub fn produce_block>( + self: &Arc, randao_reveal: Signature, slot: Slot, validator_graffiti: Option, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { self.produce_block_with_verification( randao_reveal, slot, @@ -2863,16 +2984,20 @@ impl BeaconChain { } /// Same as `produce_block` but allowing for configuration of RANDAO-verification. - pub fn produce_block_with_verification( - &self, + pub fn produce_block_with_verification>( + self: &Arc, randao_reveal: Signature, slot: Slot, validator_graffiti: Option, verification: ProduceBlockVerification, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); + let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES); + self.wait_for_fork_choice_before_block_production(slot)?; + drop(fork_choice_timer); + let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); let head_info = self .head_info() @@ -2901,7 +3026,7 @@ impl BeaconChain { }; drop(state_load_timer); - self.produce_block_on_state( + self.produce_block_on_state::( state, state_root_opt, slot, @@ -2923,7 +3048,7 @@ impl BeaconChain { /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// performing a tree hash in some scenarios. - pub fn produce_block_on_state( + pub fn produce_block_on_state>( &self, mut state: BeaconState, state_root_opt: Option, @@ -2931,7 +3056,7 @@ impl BeaconChain { randao_reveal: Signature, validator_graffiti: Option, verification: ProduceBlockVerification, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { let eth1_chain = self .eth1_chain .as_ref() @@ -3056,6 +3181,7 @@ impl BeaconChain { attestations, deposits, voluntary_exits: voluntary_exits.into(), + _phantom: PhantomData, }, }), BeaconState::Altair(_) => { @@ -3075,12 +3201,14 @@ impl BeaconChain { deposits, voluntary_exits: voluntary_exits.into(), sync_aggregate, + _phantom: PhantomData, }, }) } BeaconState::Merge(_) => { let sync_aggregate = get_sync_aggregate()?; - let execution_payload = get_execution_payload(self, &state, proposer_index)?; + let execution_payload = + get_execution_payload::(self, &state, proposer_index)?; BeaconBlock::Merge(BeaconBlockMerge { slot, proposer_index, @@ -3165,7 +3293,7 @@ impl BeaconChain { /// /// See the documentation of `InvalidationOperation` for information about defining `op`. pub fn process_invalid_execution_payload( - &self, + self: &Arc, op: &InvalidationOperation, ) -> Result<(), Error> { debug!( @@ -3233,11 +3361,19 @@ impl BeaconChain { } /// Execute the fork choice algorithm and enthrone the result as the canonical head. - pub fn fork_choice(&self) -> Result<(), Error> { + pub fn fork_choice(self: &Arc) -> Result<(), Error> { + self.fork_choice_at_slot(self.slot()?) + } + + /// Execute fork choice at `slot`, processing queued attestations from `slot - 1` and earlier. + /// + /// The `slot` is not verified in any way, callers should ensure it corresponds to at most + /// one slot ahead of the current wall-clock slot. + pub fn fork_choice_at_slot(self: &Arc, slot: Slot) -> Result<(), Error> { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); - let result = self.fork_choice_internal(); + let result = self.fork_choice_internal(slot); if result.is_err() { metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); @@ -3246,13 +3382,13 @@ impl BeaconChain { result } - fn fork_choice_internal(&self) -> Result<(), Error> { + fn fork_choice_internal(self: &Arc, slot: Slot) -> Result<(), Error> { // Atomically obtain the head block root and the finalized block. let (beacon_block_root, finalized_block) = { let mut fork_choice = self.fork_choice.write(); // Determine the root of the block that is the head of the chain. - let beacon_block_root = fork_choice.get_head(self.slot()?, &self.spec)?; + let beacon_block_root = fork_choice.get_head(slot, &self.spec)?; (beacon_block_root, fork_choice.get_finalized_block()?) }; @@ -3295,7 +3431,8 @@ impl BeaconChain { let new_head = { let beacon_block = self - .get_block(&beacon_block_root)? + .store + .get_full_block(&beacon_block_root)? .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; let beacon_state_root = beacon_block.state_root(); @@ -3399,9 +3536,6 @@ impl BeaconChain { .beacon_state .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); - // Used later for the execution engine. - let is_merge_transition_complete = is_merge_transition_complete(&new_head.beacon_state); - drop(lag_timer); // Clear the early attester cache in case it conflicts with `self.canonical_head`. @@ -3594,45 +3728,52 @@ impl BeaconChain { } } - // If this is a post-merge block, update the execution layer. - if is_merge_transition_complete { - let current_slot = self.slot()?; + // Update the execution layer. + // Always use the wall-clock slot to update the execution engine rather than the `slot` + // passed in. + if let Err(e) = self.update_execution_engine_forkchoice_blocking(self.slot()?) { + crit!( + self.log, + "Failed to update execution head"; + "error" => ?e + ); + } - if let Err(e) = self.update_execution_engine_forkchoice_blocking(current_slot) { - crit!( - self.log, - "Failed to update execution head"; - "error" => ?e - ); - } - - // Performing this call immediately after - // `update_execution_engine_forkchoice_blocking` might result in two calls to fork - // choice updated, one *without* payload attributes and then a second *with* - // payload attributes. - // - // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as - // far as I know. - if let Err(e) = self.prepare_beacon_proposer_blocking() { - crit!( - self.log, - "Failed to prepare proposers after fork choice"; - "error" => ?e - ); - } + // Performing this call immediately after + // `update_execution_engine_forkchoice_blocking` might result in two calls to fork + // choice updated, one *without* payload attributes and then a second *with* + // payload attributes. + // + // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as + // far as I know. + if let Err(e) = self.prepare_beacon_proposer_blocking() { + crit!( + self.log, + "Failed to prepare proposers after fork choice"; + "error" => ?e + ); } Ok(()) } - pub fn prepare_beacon_proposer_blocking(&self) -> Result<(), Error> { + pub fn prepare_beacon_proposer_blocking(self: &Arc) -> Result<(), Error> { + let current_slot = self.slot()?; + + // Avoids raising an error before Bellatrix. + // + // See `Self::prepare_beacon_proposer_async` for more detail. + if self.slot_is_prior_to_bellatrix(current_slot + 1) { + return Ok(()); + } + let execution_layer = self .execution_layer .as_ref() .ok_or(Error::ExecutionLayerMissing)?; execution_layer - .block_on_generic(|_| self.prepare_beacon_proposer_async()) + .block_on_generic(|_| self.prepare_beacon_proposer_async(current_slot)) .map_err(Error::PrepareProposerBlockingFailed)? } @@ -3648,7 +3789,18 @@ impl BeaconChain { /// 1. We're in the tail-end of the slot (as defined by PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) /// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're preparing for /// the next slot and the block at the current slot is already known). - pub async fn prepare_beacon_proposer_async(&self) -> Result<(), Error> { + pub async fn prepare_beacon_proposer_async( + self: &Arc, + current_slot: Slot, + ) -> Result<(), Error> { + let prepare_slot = current_slot + 1; + let prepare_epoch = prepare_slot.epoch(T::EthSpec::slots_per_epoch()); + + // There's no need to run the proposer preparation routine before the bellatrix fork. + if self.slot_is_prior_to_bellatrix(prepare_slot) { + return Ok(()); + } + let execution_layer = self .execution_layer .clone() @@ -3661,7 +3813,7 @@ impl BeaconChain { } let head = self.head_info()?; - let current_slot = self.slot()?; + let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); // Don't bother with proposer prep if the head is more than // `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot. @@ -3679,19 +3831,6 @@ impl BeaconChain { return Ok(()); } - // We only start to push preparation data for some chain *after* the transition block - // has been imported. - // - // There is no payload preparation for the transition block (i.e., the first block with - // execution enabled in some chain). - if head.execution_payload_block_hash.is_none() { - return Ok(()); - }; - - let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); - let prepare_slot = current_slot + 1; - let prepare_epoch = prepare_slot.epoch(T::EthSpec::slots_per_epoch()); - // Ensure that the shuffling decision root is correct relative to the epoch we wish to // query. let shuffling_decision_root = if head_epoch == prepare_epoch { @@ -3845,8 +3984,6 @@ impl BeaconChain { "prepare_slot" => prepare_slot ); - // Use the blocking method here so that we don't form a queue of these functions when - // routinely calling them. self.update_execution_engine_forkchoice_async(current_slot) .await?; } @@ -3855,9 +3992,16 @@ impl BeaconChain { } pub fn update_execution_engine_forkchoice_blocking( - &self, + self: &Arc, current_slot: Slot, ) -> Result<(), Error> { + // Avoids raising an error before Bellatrix. + // + // See `Self::update_execution_engine_forkchoice_async` for more detail. + if self.slot_is_prior_to_bellatrix(current_slot + 1) { + return Ok(()); + } + let execution_layer = self .execution_layer .as_ref() @@ -3869,9 +4013,24 @@ impl BeaconChain { } pub async fn update_execution_engine_forkchoice_async( - &self, + self: &Arc, current_slot: Slot, ) -> Result<(), Error> { + let next_slot = current_slot + 1; + + // There is no need to issue a `forkchoiceUpdated` (fcU) message unless the Bellatrix fork + // has: + // + // 1. Already happened. + // 2. Will happen in the next slot. + // + // The reason for a fcU message in the slot prior to the Bellatrix fork is in case the + // terminal difficulty has already been reached and a payload preparation message needs to + // be issued. + if self.slot_is_prior_to_bellatrix(next_slot) { + return Ok(()); + } + let execution_layer = self .execution_layer .as_ref() @@ -3898,34 +4057,71 @@ impl BeaconChain { // We are taking the `self.fork_choice` lock whilst holding the `forkchoice_lock`. This // is intentional, since it allows us to ensure a consistent ordering of messages to the // execution layer. - let (head_block_root, head_hash, finalized_hash) = - if let Some(params) = self.fork_choice.read().get_forkchoice_update_parameters() { - if let Some(head_hash) = params.head_hash { - ( - params.head_root, - head_hash, - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // The head block does not have an execution block hash, there is no need to - // send an update to the EL. - return Ok(()); - } + let forkchoice_update_parameters = + self.fork_choice.read().get_forkchoice_update_parameters(); + let (head_block_root, head_hash, finalized_hash) = if let Some(params) = + forkchoice_update_parameters + { + if let Some(head_hash) = params.head_hash { + ( + params.head_root, + head_hash, + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) } else { - warn!( - self.log, - "Missing forkchoice params"; - "msg" => "please report this non-critical bug" - ); - return Ok(()); - }; + // The head block does not have an execution block hash. We must check to see if we + // happen to be the proposer of the transition block, in which case we still need to + // send forkchoice_updated. + match self.spec.fork_name_at_slot::(next_slot) { + // We are pre-bellatrix; no need to update the EL. + ForkName::Base | ForkName::Altair => return Ok(()), + _ => { + // We are post-bellatrix + if execution_layer + .payload_attributes(next_slot, params.head_root) + .await + .is_some() + { + // We are a proposer, check for terminal_pow_block_hash + if let Some(terminal_pow_block_hash) = execution_layer + .get_terminal_pow_block_hash(&self.spec) + .await + .map_err(Error::ForkchoiceUpdate)? + { + info!( + self.log, + "Prepared POS transition block proposer"; "slot" => next_slot + ); + ( + params.head_root, + terminal_pow_block_hash, + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) + } else { + // TTD hasn't been reached yet, no need to update the EL. + return Ok(()); + } + } else { + // We are not a proposer, no need to update the EL. + return Ok(()); + } + } + } + } + } else { + warn!( + self.log, + "Missing forkchoice params"; + "msg" => "please report this non-critical bug" + ); + return Ok(()); + }; - let forkchoice_updated_response = self - .execution_layer - .as_ref() - .ok_or(Error::ExecutionLayerMissing)? + let forkchoice_updated_response = execution_layer .notify_forkchoice_updated(head_hash, finalized_hash, current_slot, head_block_root) .await .map_err(Error::ExecutionForkChoiceUpdateFailed); @@ -3935,8 +4131,26 @@ impl BeaconChain { drop(forkchoice_lock); match forkchoice_updated_response { - Ok(status) => match &status { - PayloadStatus::Valid | PayloadStatus::Syncing => Ok(()), + Ok(status) => match status { + PayloadStatus::Valid => { + // Ensure that fork choice knows that the block is no longer optimistic. + if let Err(e) = self + .fork_choice + .write() + .on_valid_execution_payload(head_block_root) + { + error!( + self.log, + "Failed to validate payload"; + "error" => ?e + ) + }; + Ok(()) + } + // There's nothing to be done for a syncing response. If the block is already + // `SYNCING` in fork choice, there's nothing to do. If already known to be `VALID` + // or `INVALID` then we don't want to change it to syncing. + PayloadStatus::Syncing => Ok(()), // The specification doesn't list `ACCEPTED` as a valid response to a fork choice // update. This response *seems* innocent enough, so we won't return early with an // error. However, we create a log to bring attention to the issue. @@ -3960,13 +4174,24 @@ impl BeaconChain { ); // The execution engine has stated that all blocks between the // `head_execution_block_hash` and `latest_valid_hash` are invalid. - self.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { - head_block_root, - always_invalidate_head: true, - latest_valid_ancestor: *latest_valid_hash, - }, - )?; + let chain = self.clone(); + execution_layer + .executor() + .spawn_blocking_handle( + move || { + chain.process_invalid_execution_payload( + &InvalidationOperation::InvalidateMany { + head_block_root, + always_invalidate_head: true, + latest_valid_ancestor: latest_valid_hash, + }, + ) + }, + "process_invalid_execution_payload_many", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -3982,11 +4207,22 @@ impl BeaconChain { // // Using a `None` latest valid ancestor will result in only the head block // being invalidated (no ancestors). - self.process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: head_block_root, - }, - )?; + let chain = self.clone(); + execution_layer + .executor() + .spawn_blocking_handle( + move || { + chain.process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: head_block_root, + }, + ) + }, + "process_invalid_execution_payload_single", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -3995,6 +4231,13 @@ impl BeaconChain { } } + /// Returns `true` if the given slot is prior to the `bellatrix_fork_epoch`. + fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { + self.spec.bellatrix_fork_epoch.map_or(true, |bellatrix| { + slot.epoch(T::EthSpec::slots_per_epoch()) < bellatrix + }) + } + /// Returns the status of the current head block, regarding the validity of the execution /// payload. pub fn head_safety_status(&self) -> Result { @@ -4008,7 +4251,7 @@ impl BeaconChain { let status = match head_block.execution_status { ExecutionStatus::Valid(block_hash) => HeadSafetyStatus::Safe(Some(block_hash)), ExecutionStatus::Invalid(block_hash) => HeadSafetyStatus::Invalid(block_hash), - ExecutionStatus::Unknown(block_hash) => HeadSafetyStatus::Unsafe(block_hash), + ExecutionStatus::Optimistic(block_hash) => HeadSafetyStatus::Unsafe(block_hash), ExecutionStatus::Irrelevant(_) => HeadSafetyStatus::Safe(None), }; @@ -4071,10 +4314,35 @@ impl BeaconChain { /// Called by the timer on every slot. /// - /// Performs slot-based pruning. - pub fn per_slot_task(&self) { + /// Note: this function **MUST** be called from a non-async context since + /// it contains a call to `fork_choice` which may eventually call + /// `tokio::runtime::block_on` in certain cases. + pub fn per_slot_task(self: &Arc) { trace!(self.log, "Running beacon chain per slot tasks"); if let Some(slot) = self.slot_clock.now() { + // Run fork choice and signal to any waiting task that it has completed. + if let Err(e) = self.fork_choice() { + error!( + self.log, + "Fork choice error at slot start"; + "error" => ?e, + "slot" => slot, + ); + } + + // Send the notification regardless of fork choice success, this is a "best effort" + // notification and we don't want block production to hit the timeout in case of error. + if let Some(tx) = &self.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(slot) { + warn!( + self.log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => slot, + ); + } + } + self.naive_aggregation_pool.write().prune(slot); self.block_times_cache.write().prune(slot); } @@ -4293,11 +4561,14 @@ impl BeaconChain { /// /// This could be a very expensive operation and should only be done in testing/analysis /// activities. - pub fn chain_dump(&self) -> Result>, Error> { + #[allow(clippy::type_complexity)] + pub fn chain_dump( + &self, + ) -> Result>>, Error> { let mut dump = vec![]; let mut last_slot = BeaconSnapshot { - beacon_block: self.head()?.beacon_block, + beacon_block: self.head()?.beacon_block.into(), beacon_block_root: self.head()?.beacon_block_root, beacon_state: self.head()?.beacon_state, }; @@ -4311,9 +4582,12 @@ impl BeaconChain { break; // Genesis has been reached. } - let beacon_block = self.store.get_block(&beacon_block_root)?.ok_or_else(|| { - Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) - })?; + let beacon_block = self + .store + .get_blinded_block(&beacon_block_root)? + .ok_or_else(|| { + Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) + })?; let beacon_state_root = beacon_block.state_root(); let beacon_state = self .store @@ -4398,7 +4672,7 @@ impl BeaconChain { visited.insert(block_hash); if signed_beacon_block.slot() % T::EthSpec::slots_per_epoch() == 0 { - let block = self.get_block(&block_hash).unwrap().unwrap(); + let block = self.get_blinded_block(&block_hash).unwrap().unwrap(); let state = self .get_state(&block.state_root(), Some(block.slot())) .unwrap() diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 1974686dc5..450e7c11c9 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -13,7 +13,8 @@ use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ - BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, Slot, + BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, Hash256, + Slot, }; #[derive(Debug)] @@ -255,9 +256,9 @@ where self.time = slot } - fn on_verified_block( + fn on_verified_block>( &mut self, - _block: &BeaconBlock, + _block: &BeaconBlock, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error> { @@ -301,7 +302,7 @@ where metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES); let justified_block = self .store - .get_block(&self.justified_checkpoint.root) + .get_blinded_block(&self.justified_checkpoint.root) .map_err(Error::FailedToReadBlock)? .ok_or(Error::MissingBlock(self.justified_checkpoint.root))? .deconstruct() diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index e291fc2a9e..3be198e5e9 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,11 +1,14 @@ use serde_derive::Serialize; -use types::{BeaconState, EthSpec, Hash256, SignedBeaconBlock}; +use types::{ + BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, +}; /// Represents some block and its associated state. Generally, this will be used for tracking the /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug)] -pub struct BeaconSnapshot { - pub beacon_block: SignedBeaconBlock, +pub struct BeaconSnapshot = FullPayload> { + pub beacon_block: SignedBeaconBlock, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } @@ -19,14 +22,14 @@ pub struct PreProcessingSnapshot { pub pre_state: BeaconState, /// This value is only set to `Some` if the `pre_state` was *not* advanced forward. pub beacon_state_root: Option, - pub beacon_block: SignedBeaconBlock, + pub beacon_block: SignedBlindedBeaconBlock, pub beacon_block_root: Hash256, } -impl BeaconSnapshot { +impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( - beacon_block: SignedBeaconBlock, + beacon_block: SignedBeaconBlock, beacon_block_root: Hash256, beacon_state: BeaconState, ) -> Self { @@ -49,7 +52,7 @@ impl BeaconSnapshot { /// Update all fields of the checkpoint. pub fn update( &mut self, - beacon_block: SignedBeaconBlock, + beacon_block: SignedBeaconBlock, beacon_block_root: Hash256, beacon_state: BeaconState, ) { diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index bc80847609..9a035f42a7 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -2,12 +2,12 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; use operation_pool::{AttMaxCover, MaxCover, RewardCache}; use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; -use types::{BeaconBlockRef, BeaconState, EthSpec, Hash256, RelativeEpoch}; +use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256, RelativeEpoch}; impl BeaconChain { - pub fn compute_block_reward( + pub fn compute_block_reward>( &self, - block: BeaconBlockRef<'_, T::EthSpec>, + block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, state: &BeaconState, ) -> Result { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 30d4ac9f79..48ae91c593 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -50,6 +50,7 @@ use crate::{ beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; +use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; @@ -70,12 +71,14 @@ use state_processing::{ use std::borrow::Cow; use std::fs; use std::io::Write; +use std::sync::Arc; use store::{Error as DBError, HotColdDB, KeyValueStore, StoreOp}; use tree_hash::TreeHash; +use types::ExecPayload; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ExecutionBlockHash, - Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, Epoch, EthSpec, + ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; const POS_PANDA_BANNER: &str = r#" @@ -540,7 +543,8 @@ pub fn signature_verify_chain_segment( /// A wrapper around a `SignedBeaconBlock` that indicates it has been approved for re-gossiping on /// the p2p network. -#[derive(Debug)] +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct GossipVerifiedBlock { pub block: SignedBeaconBlock, pub block_root: Hash256, @@ -572,7 +576,7 @@ pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { pub block: SignedBeaconBlock, pub block_root: Hash256, pub state: BeaconState, - pub parent_block: SignedBeaconBlock, + pub parent_block: SignedBeaconBlock>, pub confirmation_db_batch: Vec>, pub payload_verification_status: PayloadVerificationStatus, } @@ -583,7 +587,7 @@ pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { pub trait IntoFullyVerifiedBlock: Sized { fn into_fully_verified_block( self, - chain: &BeaconChain, + chain: &Arc>, ) -> Result, BlockError> { self.into_fully_verified_block_slashable(chain) .map(|fully_verified| { @@ -599,7 +603,7 @@ pub trait IntoFullyVerifiedBlock: Sized { /// Convert the block to fully-verified form while producing data to aid checking slashability. fn into_fully_verified_block_slashable( self, - chain: &BeaconChain, + chain: &Arc>, ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; @@ -840,7 +844,7 @@ impl IntoFullyVerifiedBlock for GossipVerifiedBlock { /// Completes verification of the wrapped `block`. fn into_fully_verified_block_slashable( self, - chain: &BeaconChain, + chain: &Arc>, ) -> Result, BlockSlashInfo>> { let fully_verified = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; @@ -963,7 +967,7 @@ impl IntoFullyVerifiedBlock for SignatureVerifiedBlock, + chain: &Arc>, ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { @@ -993,7 +997,7 @@ impl IntoFullyVerifiedBlock for SignedBeaconBlock, + chain: &Arc>, ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, None, chain) @@ -1021,23 +1025,27 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { block_root: Hash256, parent: PreProcessingSnapshot, mut consensus_context: ConsensusContext, - chain: &BeaconChain, + chain: &Arc>, ) -> Result> { - // Reject any block if its parent is not known to fork choice. - // - // A block that is not in fork choice is either: - // - // - Not yet imported: we should reject this block because we should only import a child - // after its parent has been fully imported. - // - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore it - // because it will revert finalization. Note that the finalized block is stored in fork - // choice, so we will not reject any child of the finalized block (this is relevant during - // genesis). - if !chain - .fork_choice - .read() - .contains_block(&block.parent_root()) - { + if let Some(parent) = chain.fork_choice.read().get_block(&block.parent_root()) { + // Reject any block where the parent has an invalid payload. It's impossible for a valid + // block to descend from an invalid parent. + if parent.execution_status.is_invalid() { + return Err(BlockError::ParentExecutionPayloadInvalid { + parent_root: block.parent_root(), + }); + } + } else { + // Reject any block if its parent is not known to fork choice. + // + // A block that is not in fork choice is either: + // + // - Not yet imported: we should reject this block because we should only import a child + // after its parent has been fully imported. + // - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore it + // because it will revert finalization. Note that the finalized block is stored in fork + // choice, so we will not reject any child of the finalized block (this is relevant during + // genesis). return Err(BlockError::ParentUnknown(Box::new(block))); } @@ -1158,7 +1166,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // If the payload did not validate or invalidate the block, check to see if this block is // valid for optimistic import. - if payload_verification_status == PayloadVerificationStatus::NotVerified { + if payload_verification_status.is_optimistic() { let current_slot = chain .slot_clock .now() @@ -1292,9 +1300,9 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { if valid_merge_transition_block { info!(chain.log, "{}", POS_PANDA_BANNER); info!(chain.log, "Proof of Stake Activated"; "slot" => block.slot()); - info!(chain.log, ""; "Terminal POW Block Hash" => ?block.message().execution_payload()?.parent_hash.into_root()); + info!(chain.log, ""; "Terminal POW Block Hash" => ?block.message().execution_payload()?.parent_hash().into_root()); info!(chain.log, ""; "Merge Transition Block Root" => ?block.message().tree_hash_root()); - info!(chain.log, ""; "Merge Transition Execution Hash" => ?block.message().execution_payload()?.block_hash.into_root()); + info!(chain.log, ""; "Merge Transition Execution Hash" => ?block.message().execution_payload()?.block_hash().into_root()); } Ok(Self { @@ -1523,7 +1531,7 @@ fn load_parent( // indicate that we don't yet know the parent. let root = block.parent_root(); let parent_block = chain - .get_block(&block.parent_root()) + .get_blinded_block(&block.parent_root()) .map_err(BlockError::BeaconChainError)? .ok_or_else(|| { // Return a `MissingBeaconBlock` error instead of a `ParentUnknown` error since diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index bd6d3ec1a6..6e66796a16 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,5 +1,6 @@ use crate::beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; +use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::head_tracker::HeadTracker; use crate::migrate::{BackgroundMigrator, MigratorConfig}; @@ -26,7 +27,7 @@ use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; -use task_executor::ShutdownReason; +use task_executor::{ShutdownReason, TaskExecutor}; use types::{ BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, @@ -90,6 +91,7 @@ pub struct BeaconChainBuilder { // Pending I/O batch that is constructed during building and should be executed atomically // alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called. pending_io_batch: Vec, + task_executor: Option, } impl @@ -128,6 +130,7 @@ where slasher: None, validator_monitor: None, pending_io_batch: vec![], + task_executor: None, } } @@ -181,6 +184,13 @@ where self.log = Some(log); self } + + /// Sets the task executor. + pub fn task_executor(mut self, task_executor: TaskExecutor) -> Self { + self.task_executor = Some(task_executor); + self + } + /// Attempt to load an existing eth1 cache from the builder's `Store`. pub fn get_persisted_eth1_backend(&self) -> Result, String> { let store = self @@ -239,7 +249,7 @@ where .ok_or("Fork choice not found in store")?; let genesis_block = store - .get_block(&chain.genesis_block_root) + .get_blinded_block(&chain.genesis_block_root) .map_err(|e| descriptive_db_error("genesis block", &e))? .ok_or("Genesis block not found in store")?; let genesis_state = store @@ -617,7 +627,7 @@ where // Try to decode the head block according to the current fork, if that fails, try // to backtrack to before the most recent fork. let (head_block_root, head_block, head_reverted) = - match store.get_block(&initial_head_block_root) { + match store.get_full_block(&initial_head_block_root) { Ok(Some(block)) => (initial_head_block_root, block, false), Ok(None) => return Err("Head block not found in store".into()), Err(StoreError::SszDecodeError(_)) => { @@ -714,6 +724,16 @@ where ); } + // If enabled, set up the fork choice signaller. + let (fork_choice_signal_tx, fork_choice_signal_rx) = + if self.chain_config.fork_choice_before_proposal_timeout_ms != 0 { + let tx = ForkChoiceSignalTx::new(); + let rx = tx.get_receiver(); + (Some(tx), Some(rx)) + } else { + (None, None) + }; + // Store the `PersistedBeaconChain` in the database atomically with the metadata so that on // restart we can correctly detect the presence of an initialized database. // @@ -772,6 +792,8 @@ where genesis_block_root, genesis_state_root, fork_choice: RwLock::new(fork_choice), + fork_choice_signal_tx, + fork_choice_signal_rx, event_handler: self.event_handler, head_tracker, shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), @@ -944,6 +966,7 @@ mod test { use std::time::Duration; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; + use task_executor::test_utils::TestRuntime; use types::{EthSpec, MinimalEthSpec, Slot}; type TestEthSpec = MinimalEthSpec; @@ -977,10 +1000,12 @@ mod test { .expect("should create interop genesis state"); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let runtime = TestRuntime::default(); let chain = BeaconChainBuilder::new(MinimalEthSpec) .logger(log.clone()) .store(Arc::new(store)) + .task_executor(runtime.task_executor.clone()) .genesis_state(genesis_state) .expect("should build state using recent genesis") .dummy_eth1_backend() @@ -1011,10 +1036,10 @@ mod test { assert_eq!( chain .store - .get_block(&Hash256::zero()) + .get_blinded_block(&Hash256::zero()) .expect("should read db") .expect("should find genesis block"), - block, + block.clone().into(), "should store genesis block under zero hash alias" ); assert_eq!( diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 4aee06d468..36c2f41d9d 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,6 +1,8 @@ use serde_derive::{Deserialize, Serialize}; use types::Checkpoint; +pub const DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT: u64 = 250; + #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)] pub struct ChainConfig { /// Maximum number of slots to skip when importing a consensus message (e.g., block, @@ -18,6 +20,10 @@ pub struct ChainConfig { pub enable_lock_timeouts: bool, /// The max size of a message that can be sent over the network. pub max_network_size: usize, + /// Number of milliseconds to wait for fork choice before proposing a block. + /// + /// If set to 0 then block proposal will not wait for fork choice at all. + pub fork_choice_before_proposal_timeout_ms: u64, } impl Default for ChainConfig { @@ -28,6 +34,7 @@ impl Default for ChainConfig { reconstruct_historic_states: false, enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M + fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, } } } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 56dced94e6..f589585f8a 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -104,6 +104,10 @@ impl EarlyAttesterCache { return Ok(None); } + if request_slot < item.block.slot() { + return Ok(None); + } + let committee_count = item .committee_lengths .get_committee_count_per_slot::(spec)?; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 42777cad0b..2442852be2 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -9,6 +9,7 @@ use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use execution_layer::PayloadStatus; +use fork_choice::ExecutionStatus; use futures::channel::mpsc::TrySendError; use operation_pool::OpPoolError; use safe_arith::ArithError; @@ -25,6 +26,7 @@ use state_processing::{ }; use std::time::Duration; use task_executor::ShutdownReason; +use tokio::task::JoinError; use types::*; macro_rules! easy_from_to { @@ -89,7 +91,7 @@ pub enum BeaconChainError { BlockSignatureVerifierError(state_processing::block_signature_verifier::Error), BlockReplayError(BlockReplayError), DuplicateValidatorPublicKey, - ValidatorPubkeyCacheFileError(String), + ValidatorPubkeyCacheError(String), ValidatorIndexUnknown(usize), ValidatorPubkeyUnknown(PublicKeyBytes), OpPoolError(OpPoolError), @@ -137,6 +139,18 @@ pub enum BeaconChainError { }, AltairForkDisabled, ExecutionLayerMissing, + BlockVariantLacksExecutionPayload(Hash256), + ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, execution_layer::Error), + BlockHashMissingFromExecutionLayer(ExecutionBlockHash), + InconsistentPayloadReconstructed { + slot: Slot, + exec_block_hash: ExecutionBlockHash, + canonical_payload_root: Hash256, + reconstructed_payload_root: Hash256, + canonical_transactions_root: Hash256, + reconstructed_transactions_root: Hash256, + }, + AddPayloadLogicError, ExecutionForkChoiceUpdateFailed(execution_layer::Error), PrepareProposerBlockingFailed(execution_layer::Error), ExecutionForkChoiceUpdateInvalid { @@ -162,6 +176,19 @@ pub enum BeaconChainError { fork_choice: Hash256, }, InvalidSlot(Slot), + HeadBlockNotFullyVerified { + beacon_block_root: Hash256, + execution_status: ExecutionStatus, + }, + CannotAttestToFinalizedBlock { + beacon_block_root: Hash256, + }, + RuntimeShutdown, + ProcessInvalidExecutionPayload(JoinError), + ForkChoiceSignalOutOfOrder { + current: Slot, + latest: Slot, + }, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -212,6 +239,7 @@ pub enum BlockProductionError { FailedToLoadState(store::Error), MissingFinalizedBlock(Hash256), BlockTooLarge(usize), + ForkChoiceError(BeaconChainError), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 0ee9e4b876..08e4cd41ef 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -20,6 +20,7 @@ use state_processing::per_block_processing::{ compute_timestamp_at_slot, is_execution_enabled, is_merge_transition_complete, partially_verify_execution_payload, }; +use std::sync::Arc; use types::*; /// Verify that `execution_payload` contained by `block` is considered valid by an execution @@ -32,7 +33,7 @@ use types::*; /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload pub fn notify_new_payload( - chain: &BeaconChain, + chain: &Arc>, state: &BeaconState, block: BeaconBlockRef, ) -> Result> { @@ -53,14 +54,15 @@ pub fn notify_new_payload( .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let new_payload_response = execution_layer - .block_on(|execution_layer| execution_layer.notify_new_payload(execution_payload)); + let new_payload_response = execution_layer.block_on(|execution_layer| { + execution_layer.notify_new_payload(&execution_payload.execution_payload) + }); match new_payload_response { Ok(status) => match status { PayloadStatus::Valid => Ok(PayloadVerificationStatus::Verified), PayloadStatus::Syncing | PayloadStatus::Accepted => { - Ok(PayloadVerificationStatus::NotVerified) + Ok(PayloadVerificationStatus::Optimistic) } PayloadStatus::Invalid { latest_valid_hash, .. @@ -118,10 +120,10 @@ pub fn validate_merge_block( .into()); } - if execution_payload.parent_hash != spec.terminal_block_hash { + if execution_payload.parent_hash() != spec.terminal_block_hash { return Err(ExecutionPayloadError::InvalidTerminalBlockHash { terminal_block_hash: spec.terminal_block_hash, - payload_parent_hash: execution_payload.parent_hash, + payload_parent_hash: execution_payload.parent_hash(), } .into()); } @@ -136,14 +138,14 @@ pub fn validate_merge_block( let is_valid_terminal_pow_block = execution_layer .block_on(|execution_layer| { - execution_layer.is_valid_terminal_pow_block_hash(execution_payload.parent_hash, spec) + execution_layer.is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) }) .map_err(ExecutionPayloadError::from)?; match is_valid_terminal_pow_block { Some(true) => Ok(()), Some(false) => Err(ExecutionPayloadError::InvalidTerminalPoWBlock { - parent_hash: execution_payload.parent_hash, + parent_hash: execution_payload.parent_hash(), } .into()), None => { @@ -167,7 +169,7 @@ pub fn validate_merge_block( debug!( chain.log, "Optimistically accepting terminal block"; - "block_hash" => ?execution_payload.parent_hash, + "block_hash" => ?execution_payload.parent_hash(), "msg" => "the terminal block/parent was unavailable" ); Ok(()) @@ -192,7 +194,7 @@ pub fn validate_execution_payload_for_gossip( let is_merge_transition_complete = match parent_block.execution_status { // Optimistically declare that an "unknown" status block has completed the merge. - ExecutionStatus::Valid(_) | ExecutionStatus::Unknown(_) => true, + ExecutionStatus::Valid(_) | ExecutionStatus::Optimistic(_) => true, // It's impossible for an irrelevant block to have completed the merge. It is pre-merge // by definition. ExecutionStatus::Irrelevant(_) => false, @@ -215,11 +217,11 @@ pub fn validate_execution_payload_for_gossip( ))?; // The block's execution payload timestamp is correct with respect to the slot - if execution_payload.timestamp != expected_timestamp { + if execution_payload.timestamp() != expected_timestamp { return Err(BlockError::ExecutionPayloadError( ExecutionPayloadError::InvalidPayloadTimestamp { expected: expected_timestamp, - found: execution_payload.timestamp, + found: execution_payload.timestamp(), }, )); } @@ -241,20 +243,23 @@ pub fn validate_execution_payload_for_gossip( /// Equivalent to the `get_execution_payload` function in the Validator Guide: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal -pub fn get_execution_payload( +pub fn get_execution_payload>( chain: &BeaconChain, state: &BeaconState, proposer_index: u64, -) -> Result, BlockProductionError> { - Ok(prepare_execution_payload_blocking(chain, state, proposer_index)?.unwrap_or_default()) +) -> Result { + Ok( + prepare_execution_payload_blocking::(chain, state, proposer_index)? + .unwrap_or_default(), + ) } /// Wraps the async `prepare_execution_payload` function as a blocking task. -pub fn prepare_execution_payload_blocking( +pub fn prepare_execution_payload_blocking>( chain: &BeaconChain, state: &BeaconState, proposer_index: u64, -) -> Result>, BlockProductionError> { +) -> Result, BlockProductionError> { let execution_layer = chain .execution_layer .as_ref() @@ -262,7 +267,7 @@ pub fn prepare_execution_payload_blocking( execution_layer .block_on_generic(|_| async { - prepare_execution_payload(chain, state, proposer_index).await + prepare_execution_payload::(chain, state, proposer_index).await }) .map_err(BlockProductionError::BlockingFailed)? } @@ -281,11 +286,11 @@ pub fn prepare_execution_payload_blocking( /// Equivalent to the `prepare_execution_payload` function in the Validator Guide: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal -pub async fn prepare_execution_payload( +pub async fn prepare_execution_payload>( chain: &BeaconChain, state: &BeaconState, proposer_index: u64, -) -> Result>, BlockProductionError> { +) -> Result, BlockProductionError> { let spec = &chain.spec; let execution_layer = chain .execution_layer @@ -328,19 +333,19 @@ pub async fn prepare_execution_payload( } else { chain .store - .get_block(&finalized_root) + .get_blinded_block(&finalized_root) .map_err(BlockProductionError::FailedToReadFinalizedBlock)? .ok_or(BlockProductionError::MissingFinalizedBlock(finalized_root))? .message() .body() .execution_payload() .ok() - .map(|ep| ep.block_hash) + .map(|ep| ep.block_hash()) }; // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. let execution_payload = execution_layer - .get_payload( + .get_payload::( parent_hash, timestamp, random, diff --git a/beacon_node/beacon_chain/src/fork_choice_signal.rs b/beacon_node/beacon_chain/src/fork_choice_signal.rs new file mode 100644 index 0000000000..fd92de661d --- /dev/null +++ b/beacon_node/beacon_chain/src/fork_choice_signal.rs @@ -0,0 +1,97 @@ +//! Concurrency helpers for synchronising block proposal with fork choice. +//! +//! The transmitter provides a way for a thread runnning fork choice on a schedule to signal +//! to the receiver that fork choice has been updated for a given slot. +use crate::BeaconChainError; +use parking_lot::{Condvar, Mutex}; +use std::sync::Arc; +use std::time::Duration; +use types::Slot; + +/// Sender, for use by the per-slot task timer. +pub struct ForkChoiceSignalTx { + pair: Arc<(Mutex, Condvar)>, +} + +/// Receiver, for use by the beacon chain waiting on fork choice to complete. +pub struct ForkChoiceSignalRx { + pair: Arc<(Mutex, Condvar)>, +} + +pub enum ForkChoiceWaitResult { + /// Successfully reached a slot greater than or equal to the awaited slot. + Success(Slot), + /// Fork choice was updated to a lower slot, indicative of lag or processing delays. + Behind(Slot), + /// Timed out waiting for the fork choice update from the sender. + TimeOut, +} + +impl ForkChoiceSignalTx { + pub fn new() -> Self { + let pair = Arc::new((Mutex::new(Slot::new(0)), Condvar::new())); + Self { pair } + } + + pub fn get_receiver(&self) -> ForkChoiceSignalRx { + ForkChoiceSignalRx { + pair: self.pair.clone(), + } + } + + /// Signal to the receiver that fork choice has been updated to `slot`. + /// + /// Return an error if the provided `slot` is strictly less than any previously provided slot. + pub fn notify_fork_choice_complete(&self, slot: Slot) -> Result<(), BeaconChainError> { + let &(ref lock, ref condvar) = &*self.pair; + + let mut current_slot = lock.lock(); + + if slot < *current_slot { + return Err(BeaconChainError::ForkChoiceSignalOutOfOrder { + current: *current_slot, + latest: slot, + }); + } else { + *current_slot = slot; + } + + // We use `notify_all` because there may be multiple block proposals waiting simultaneously. + // Usually there'll be 0-1. + condvar.notify_all(); + + Ok(()) + } +} + +impl Default for ForkChoiceSignalTx { + fn default() -> Self { + Self::new() + } +} + +impl ForkChoiceSignalRx { + pub fn wait_for_fork_choice(&self, slot: Slot, timeout: Duration) -> ForkChoiceWaitResult { + let &(ref lock, ref condvar) = &*self.pair; + + let mut current_slot = lock.lock(); + + // Wait for `current_slot >= slot`. + // + // Do not loop and wait, if we receive an update for the wrong slot then something is + // quite out of whack and we shouldn't waste more time waiting. + if *current_slot < slot { + let timeout_result = condvar.wait_for(&mut current_slot, timeout); + + if timeout_result.timed_out() { + return ForkChoiceWaitResult::TimeOut; + } + } + + if *current_slot >= slot { + ForkChoiceWaitResult::Success(*current_slot) + } else { + ForkChoiceWaitResult::Behind(*current_slot) + } + } +} diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index e837c4fa62..fd452c33f8 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -49,7 +49,7 @@ pub fn revert_to_fork_boundary, Cold: ItemStore ); let block_iter = ParentRootBlockIterator::fork_tolerant(&store, head_block_root); - process_results(block_iter, |mut iter| { + let (block_root, blinded_block) = process_results(block_iter, |mut iter| { iter.find_map(|(block_root, block)| { if block.slot() < fork_epoch.start_slot(E::slots_per_epoch()) { Some((block_root, block)) @@ -70,7 +70,13 @@ pub fn revert_to_fork_boundary, Cold: ItemStore e, CORRUPT_DB_MESSAGE ) })? - .ok_or_else(|| format!("No pre-fork blocks found. {}", CORRUPT_DB_MESSAGE)) + .ok_or_else(|| format!("No pre-fork blocks found. {}", CORRUPT_DB_MESSAGE))?; + + let block = store + .make_full_block(&block_root, blinded_block) + .map_err(|e| format!("Unable to add payload to new head block: {:?}", e))?; + + Ok((block_root, block)) } /// Reset fork choice to the finalized checkpoint of the supplied head state. @@ -98,7 +104,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It let finalized_checkpoint = head_state.finalized_checkpoint(); let finalized_block_root = finalized_checkpoint.root; let finalized_block = store - .get_block(&finalized_block_root) + .get_full_block(&finalized_block_root) .map_err(|e| format!("Error loading finalized block: {:?}", e))? .ok_or_else(|| { format!( @@ -175,7 +181,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It // retro-actively determine if they were valid or not. // // This scenario is so rare that it seems OK to double-verify some blocks. - let payload_verification_status = PayloadVerificationStatus::NotVerified; + let payload_verification_status = PayloadVerificationStatus::Optimistic; let (block, _) = block.deconstruct(); fork_choice diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 234e6c64e4..1891362ebb 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -9,7 +9,7 @@ use std::borrow::Cow; use std::iter; use std::time::Duration; use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore}; -use types::{Hash256, SignedBeaconBlock, Slot}; +use types::{Hash256, SignedBlindedBeaconBlock, Slot}; /// Use a longer timeout on the pubkey cache. /// @@ -58,7 +58,7 @@ impl BeaconChain { /// Return the number of blocks successfully imported. pub fn import_historical_block_batch( &self, - blocks: &[SignedBeaconBlock], + blocks: Vec>, ) -> Result { let anchor_info = self .store @@ -106,8 +106,9 @@ impl BeaconChain { .into()); } - // Store block in the hot database. - hot_batch.push(self.store.block_as_kv_store_op(&block_root, block)); + // Store block in the hot database without payload. + self.store + .blinded_block_as_kv_store_ops(&block_root, block, &mut hot_batch); // Store block roots, including at all skip slots in the freezer DB. for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 412066cb90..644bce65b8 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -15,6 +15,7 @@ mod errors; pub mod eth1_chain; pub mod events; mod execution_payload; +pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 4ee0904e23..efeb76e76f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -86,6 +86,10 @@ lazy_static! { ); pub static ref BLOCK_PRODUCTION_TIMES: Result = try_create_histogram("beacon_block_production_seconds", "Full runtime of block production"); + pub static ref BLOCK_PRODUCTION_FORK_CHOICE_TIMES: Result = try_create_histogram( + "beacon_block_production_fork_choice_seconds", + "Time taken to run fork choice before block production" + ); pub static ref BLOCK_PRODUCTION_STATE_LOAD_TIMES: Result = try_create_histogram( "beacon_block_production_state_load_seconds", "Time taken to load the base state for block production" diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index d13a3d08c5..84a0b1e8dd 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -396,7 +396,7 @@ impl, Cold: ItemStore> BackgroundMigrator block.state_root(), Ok(None) => { return Err(BeaconStateError::MissingBeaconBlock(head_hash.into()).into()) @@ -518,7 +518,12 @@ impl, Cold: ItemStore> BackgroundMigrator> = abandoned_blocks .into_iter() .map(Into::into) - .map(StoreOp::DeleteBlock) + .flat_map(|block_root: Hash256| { + [ + StoreOp::DeleteBlock(block_root), + StoreOp::DeleteExecutionPayload(block_root), + ] + }) .collect(); // Persist the head in case the process is killed or crashes here. This prevents diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index c524bd682a..bb0132f5fe 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -203,6 +203,7 @@ impl ObservedAggregates { /// Check to see if the `root` of `item` is in self. /// /// `root` must equal `a.tree_hash_root()`. + #[allow(clippy::wrong_self_convention)] pub fn is_known(&mut self, item: &T, root: Hash256) -> Result { let index = self.get_set_index(item.get_slot())?; diff --git a/beacon_node/beacon_chain/src/pre_finalization_cache.rs b/beacon_node/beacon_chain/src/pre_finalization_cache.rs index 41771b048d..112394bb18 100644 --- a/beacon_node/beacon_chain/src/pre_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/pre_finalization_cache.rs @@ -71,7 +71,7 @@ impl BeaconChain { } // 2. Check on disk. - if self.store.get_block(&block_root)?.is_some() { + if self.store.get_blinded_block(&block_root)?.is_some() { cache.block_roots.put(block_root, ()); return Ok(true); } diff --git a/beacon_node/beacon_chain/src/proposer_prep_service.rs b/beacon_node/beacon_chain/src/proposer_prep_service.rs index 59977f02c8..18abbc8c5b 100644 --- a/beacon_node/beacon_chain/src/proposer_prep_service.rs +++ b/beacon_node/beacon_chain/src/proposer_prep_service.rs @@ -50,12 +50,19 @@ async fn proposer_prep_service( let inner_chain = chain.clone(); executor.spawn( async move { - if let Err(e) = inner_chain.prepare_beacon_proposer_async().await { - error!( - inner_chain.log, - "Proposer prepare routine failed"; - "error" => ?e - ); + if let Ok(current_slot) = inner_chain.slot() { + if let Err(e) = inner_chain + .prepare_beacon_proposer_async(current_slot) + .await + { + error!( + inner_chain.log, + "Proposer prepare routine failed"; + "error" => ?e + ); + } + } else { + debug!(inner_chain.log, "No slot for proposer prepare routine"); } }, "proposer_prep_update", diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 4fe9b702bd..cc8b20d699 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -3,24 +3,17 @@ mod migration_schema_v10; mod migration_schema_v6; mod migration_schema_v7; mod migration_schema_v8; +mod migration_schema_v9; mod types; -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; -use crate::validator_pubkey_cache::ValidatorPubkeyCache; -use operation_pool::{PersistedOperationPool, PersistedOperationPoolBase}; use slog::{warn, Logger}; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::fs; use std::path::Path; use std::sync::Arc; -use store::config::OnDiskStoreConfig; use store::hot_cold_store::{HotColdDB, HotColdDBError}; -use store::metadata::{SchemaVersion, CONFIG_KEY, CURRENT_SCHEMA_VERSION}; -use store::{DBColumn, Error as StoreError, ItemStore, StoreItem}; - -const PUBKEY_CACHE_FILENAME: &str = "pubkey_cache.ssz"; +use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; +use store::{Error as StoreError, StoreItem}; /// Migrate the database from one schema version to another, applying all requisite mutations. pub fn migrate_schema( @@ -33,75 +26,17 @@ pub fn migrate_schema( match (from, to) { // Migrating from the current schema version to iself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), - // Migrate across multiple versions by recursively migrating one step at a time. + // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); migrate_schema::(db.clone(), datadir, from, next, log.clone())?; migrate_schema::(db, datadir, next, to, log) } - // Migration from v0.3.0 to v0.3.x, adding the temporary states column. - // Nothing actually needs to be done, but once a DB uses v2 it shouldn't go back. - (SchemaVersion(1), SchemaVersion(2)) => { - db.store_schema_version(to)?; - Ok(()) - } - // Migration for removing the pubkey cache. - (SchemaVersion(2), SchemaVersion(3)) => { - let pk_cache_path = datadir.join(PUBKEY_CACHE_FILENAME); - // Load from file, store to DB. - ValidatorPubkeyCache::::load_from_file(&pk_cache_path) - .and_then(|cache| ValidatorPubkeyCache::convert(cache, db.clone())) - .map_err(|e| StoreError::SchemaMigrationError(format!("{:?}", e)))?; + // + // Migrations from before SchemaVersion(5) are deprecated. + // - db.store_schema_version(to)?; - - // Delete cache file now that keys are stored in the DB. - fs::remove_file(&pk_cache_path).map_err(|e| { - StoreError::SchemaMigrationError(format!( - "unable to delete {}: {:?}", - pk_cache_path.display(), - e - )) - })?; - - Ok(()) - } - // Migration for adding sync committee contributions to the persisted op pool. - (SchemaVersion(3), SchemaVersion(4)) => { - // Deserialize from what exists in the database using the `PersistedOperationPoolBase` - // variant and convert it to the Altair variant. - let pool_opt = db - .get_item::>(&OP_POOL_DB_KEY)? - .map(PersistedOperationPool::Base) - .map(PersistedOperationPool::base_to_altair); - - if let Some(pool) = pool_opt { - // Store the converted pool under the same key. - db.put_item::>(&OP_POOL_DB_KEY, &pool)?; - } - - db.store_schema_version(to)?; - - Ok(()) - } - // Migration for weak subjectivity sync support and clean up of `OnDiskStoreConfig` (#1784). - (SchemaVersion(4), SchemaVersion(5)) => { - if let Some(OnDiskStoreConfigV4 { - slots_per_restore_point, - .. - }) = db.hot_db.get(&CONFIG_KEY)? - { - let new_config = OnDiskStoreConfig { - slots_per_restore_point, - }; - db.hot_db.put(&CONFIG_KEY, &new_config)?; - } - - db.store_schema_version(to)?; - - Ok(()) - } // Migration for adding `execution_status` field to the fork choice store. (SchemaVersion(5), SchemaVersion(6)) => { // Database operations to be done atomically @@ -182,12 +117,21 @@ pub fn migrate_schema( Ok(()) } - // Reserved for merge-related changes. - (SchemaVersion(8), SchemaVersion(9)) => Ok(()), + // Upgrade from v8 to v9 to separate the execution payloads into their own column. + (SchemaVersion(8), SchemaVersion(9)) => { + migration_schema_v9::upgrade_to_v9::(db.clone(), log)?; + db.store_schema_version(to) + } + // Downgrade from v9 to v8 to ignore the separation of execution payloads + // NOTE: only works before the Bellatrix fork epoch. + (SchemaVersion(9), SchemaVersion(8)) => { + migration_schema_v9::downgrade_from_v9::(db.clone(), log)?; + db.store_schema_version(to) + } // Upgrade for tree-states database changes. (SchemaVersion(9), SchemaVersion(10)) => migration_schema_v10::upgrade_to_v10::(db, log), // Downgrade for tree-states database changes. - (SchemaVersion(10), SchemaVersion(8)) => { + (SchemaVersion(10), SchemaVersion(9)) => { migration_schema_v10::downgrade_from_v10::(db, log) } // Anything else is an error. @@ -198,24 +142,3 @@ pub fn migrate_schema( .into()), } } - -// Store config used in v4 schema and earlier. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] -pub struct OnDiskStoreConfigV4 { - pub slots_per_restore_point: u64, - pub _block_cache_size: usize, -} - -impl StoreItem for OnDiskStoreConfigV4 { - fn db_column() -> DBColumn { - DBColumn::BeaconMeta - } - - fn as_store_bytes(&self) -> Result, StoreError> { - Ok(self.as_ssz_bytes()) - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs index 9e9d4525e8..c6df8b918e 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs @@ -70,7 +70,7 @@ pub fn upgrade_to_v10( .zip(ssz_head_tracker.slots) { let block = db - .get_block(&head_block_root)? + .get_blinded_block(&head_block_root)? .ok_or(Error::BlockNotFound(head_block_root))?; let head_state_root = block.state_root(); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index ebf89ec22e..4cede798ea 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -31,7 +31,7 @@ pub(crate) fn update_with_reinitialized_fork_choice( .finalized_checkpoint .root; let anchor_block = db - .get_block(&anchor_block_root) + .get_full_block_prior_to_v9(&anchor_block_root) .map_err(|e| format!("{:?}", e))? .ok_or_else(|| "Missing anchor beacon block".to_string())?; let anchor_state = db diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs index 5998eaa125..ef3f7857f9 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v8.rs @@ -34,7 +34,7 @@ pub fn update_fork_choice( // before schema v8 the cache would always miss on skipped slots. for item in balances_cache.items { // Drop any blocks that aren't found, they're presumably too old and this is only a cache. - if let Some(block) = db.get_block(&item.block_root)? { + if let Some(block) = db.get_full_block_prior_to_v9(&item.block_root)? { fork_choice_store.balances_cache.items.push(CacheItemV8 { block_root: item.block_root, epoch: block.slot().epoch(T::EthSpec::slots_per_epoch()), diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs new file mode 100644 index 0000000000..e2c48d5c89 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v9.rs @@ -0,0 +1,176 @@ +use crate::beacon_chain::BeaconChainTypes; +use slog::{debug, error, info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{DBColumn, Error, HotColdDB, KeyValueStore}; +use types::{EthSpec, Hash256, Slot}; + +const OPS_PER_BLOCK_WRITE: usize = 2; + +/// The slot clock isn't usually available before the database is initialized, so we construct a +/// temporary slot clock by reading the genesis state. It should always exist if the database is +/// initialized at a prior schema version, however we still handle the lack of genesis state +/// gracefully. +fn get_slot_clock( + db: &HotColdDB, + log: &Logger, +) -> Result, Error> { + // At schema v8 the genesis block must be a *full* block (with payload). In all likeliness it + // actually has no payload. + let spec = db.get_chain_spec(); + let genesis_block = if let Some(block) = db.get_full_block_prior_to_v9(&Hash256::zero())? { + block + } else { + error!(log, "Missing genesis block"); + return Ok(None); + }; + let genesis_state = + if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { + state + } else { + error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); + return Ok(None); + }; + Ok(Some(T::SlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_state.genesis_time()), + Duration::from_secs(spec.seconds_per_slot), + ))) +} + +pub fn upgrade_to_v9( + db: Arc>, + log: Logger, +) -> Result<(), Error> { + // This upgrade is a no-op if the Bellatrix fork epoch has not already passed. This migration + // was implemented before the activation of Bellatrix on all networks except Kiln, so the only + // users who will need to wait for the slow copying migration are Kiln users. + let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { + slot_clock + } else { + error!( + log, + "Unable to complete migration because genesis state or genesis block is missing" + ); + return Err(Error::SlotClockUnavailableForMigration); + }; + + let current_epoch = if let Some(slot) = slot_clock.now() { + slot.epoch(T::EthSpec::slots_per_epoch()) + } else { + return Ok(()); + }; + + let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { + fork_epoch + } else { + info!( + log, + "Upgrading database schema to v9 (no-op)"; + "info" => "To downgrade before the merge run `lighthouse db migrate`" + ); + return Ok(()); + }; + + if current_epoch >= bellatrix_fork_epoch { + info!( + log, + "Upgrading database schema to v9"; + "info" => "This will take several minutes. Each block will be read from and \ + re-written to the database. You may safely exit now (Ctrl-C) and resume \ + the migration later. Downgrading is no longer possible." + ); + + for res in db.hot_db.iter_column_keys(DBColumn::BeaconBlock) { + let block_root = res?; + let block = match db.get_full_block_prior_to_v9(&block_root) { + // A pre-v9 block is present. + Ok(Some(block)) => block, + // A block is missing. + Ok(None) => return Err(Error::BlockNotFound(block_root)), + // There was an error reading a pre-v9 block. Try reading it as a post-v9 block. + Err(_) => { + if db.try_get_full_block(&block_root)?.is_some() { + // The block is present as a post-v9 block, assume that it was already + // correctly migrated. + continue; + } else { + // This scenario should not be encountered since a prior check has ensured + // that this block exists. + return Err(Error::V9MigrationFailure(block_root)); + } + } + }; + + if block.message().execution_payload().is_ok() { + // Overwrite block with blinded block and store execution payload separately. + debug!( + log, + "Rewriting Bellatrix block"; + "block_root" => ?block_root, + ); + + let mut kv_batch = Vec::with_capacity(OPS_PER_BLOCK_WRITE); + db.block_as_kv_store_ops(&block_root, block, &mut kv_batch)?; + db.hot_db.do_atomically(kv_batch)?; + } + } + } else { + info!( + log, + "Upgrading database schema to v9 (no-op)"; + "info" => "To downgrade before the merge run `lighthouse db migrate`" + ); + } + + Ok(()) +} + +// This downgrade is conditional and will only succeed if the Bellatrix fork epoch hasn't been +// reached. +pub fn downgrade_from_v9( + db: Arc>, + log: Logger, +) -> Result<(), Error> { + let slot_clock = if let Some(slot_clock) = get_slot_clock::(&db, &log)? { + slot_clock + } else { + error!( + log, + "Unable to complete migration because genesis state or genesis block is missing" + ); + return Err(Error::SlotClockUnavailableForMigration); + }; + + let current_epoch = if let Some(slot) = slot_clock.now() { + slot.epoch(T::EthSpec::slots_per_epoch()) + } else { + return Ok(()); + }; + + let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch { + fork_epoch + } else { + info!( + log, + "Downgrading database schema from v9"; + "info" => "You need to upgrade to v9 again before the merge" + ); + return Ok(()); + }; + + if current_epoch >= bellatrix_fork_epoch { + error!( + log, + "Downgrading from schema v9 after the Bellatrix fork epoch is not supported"; + "current_epoch" => current_epoch, + "bellatrix_fork_epoch" => bellatrix_fork_epoch, + "reason" => "You need a v9 schema database to run on a merged version of Prater or \ + mainnet. On Kiln, you have to re-sync", + ); + Err(Error::ResyncRequiredForExecutionPayloadSeparation) + } else { + Ok(()) + } +} diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index cdbb7a88f4..ed1df94677 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -25,7 +25,7 @@ use std::sync::{ Arc, }; use task_executor::TaskExecutor; -use tokio::time::sleep; +use tokio::time::{sleep, sleep_until, Instant}; use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform @@ -117,8 +117,8 @@ async fn state_advance_timer( let slot_duration = slot_clock.slot_duration(); loop { - match beacon_chain.slot_clock.duration_to_next_slot() { - Some(duration) => sleep(duration + (slot_duration / 4) * 3).await, + let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { + Some(duration) => duration, None => { error!(log, "Failed to read slot clock"); // If we can't read the slot clock, just wait another slot. @@ -127,7 +127,45 @@ async fn state_advance_timer( } }; - // Only start spawn the state advance task if the lock was previously free. + // Run the state advance 3/4 of the way through the slot (9s on mainnet). + let state_advance_offset = slot_duration / 4; + let state_advance_instant = if duration_to_next_slot > state_advance_offset { + Instant::now() + duration_to_next_slot - state_advance_offset + } else { + // Skip the state advance for the current slot and wait until the next one. + Instant::now() + duration_to_next_slot + slot_duration - state_advance_offset + }; + + // Run fork choice 23/24s of the way through the slot (11.5s on mainnet). + // We need to run after the state advance, so use the same condition as above. + let fork_choice_offset = slot_duration / 24; + let fork_choice_instant = if duration_to_next_slot > state_advance_offset { + Instant::now() + duration_to_next_slot - fork_choice_offset + } else { + Instant::now() + duration_to_next_slot + slot_duration - fork_choice_offset + }; + + // Wait for the state advance. + sleep_until(state_advance_instant).await; + + // Compute the current slot here at approx 3/4 through the slot. Even though this slot is + // only used by fork choice we need to calculate it here rather than after the state + // advance, in case the state advance flows over into the next slot. + let current_slot = match beacon_chain.slot() { + Ok(slot) => slot, + Err(e) => { + warn!( + log, + "Unable to determine slot in state advance timer"; + "error" => ?e + ); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + continue; + } + }; + + // Only spawn the state advance task if the lock was previously free. if !is_running.lock() { let log = log.clone(); let beacon_chain = beacon_chain.clone(); @@ -175,11 +213,45 @@ async fn state_advance_timer( "msg" => "system resources may be overloaded" ) } + + // Run fork choice pre-emptively for the next slot. This processes most of the attestations + // from this slot off the hot path of block verification and production. + // Wait for the fork choice instant (which may already be past). + sleep_until(fork_choice_instant).await; + + let log = log.clone(); + let beacon_chain = beacon_chain.clone(); + let next_slot = current_slot + 1; + executor.spawn_blocking( + move || { + if let Err(e) = beacon_chain.fork_choice_at_slot(next_slot) { + warn!( + log, + "Error updating fork choice for next slot"; + "error" => ?e, + "slot" => next_slot, + ); + } + + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &beacon_chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(next_slot) { + warn!( + log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => next_slot, + ); + } + } + }, + "fork_choice_advance", + ); } } fn advance_head( - beacon_chain: &BeaconChain, + beacon_chain: &Arc>, log: &Logger, ) -> Result<(), Error> { let current_slot = beacon_chain.slot()?; @@ -200,13 +272,6 @@ fn advance_head( } } - // Run fork choice so we get the latest view of the head. - // - // This is useful since it's quite likely that the last time we ran fork choice was shortly - // after receiving the latest gossip block, but not necessarily after we've received the - // majority of attestations. - beacon_chain.fork_choice()?; - let head_info = beacon_chain.head_info()?; let head_block_root = head_info.block_root; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 86777252e5..1fe1cec983 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -12,15 +12,12 @@ use crate::{ }; use bls::get_withdrawal_credentials; use execution_layer::{ - test_utils::{ - ExecutionBlockGenerator, ExecutionLayerRuntime, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK, - }, + test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK}, ExecutionLayer, }; use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; -use logging::test_logger; use merkle_proof::MerkleTree; use parking_lot::Mutex; use parking_lot::RwLockWriteGuard; @@ -38,19 +35,11 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; -use task_executor::ShutdownReason; +use task_executor::{test_utils::TestRuntime, ShutdownReason}; use tree_hash::TreeHash; use types::sync_selection_proof::SyncSelectionProof; pub use types::test_utils::generate_deterministic_keypairs; -use types::{ - typenum::U4294967296, Address, AggregateSignature, Attestation, AttestationData, - AttesterSlashing, BeaconBlock, BeaconState, BeaconStateHash, ChainSpec, Checkpoint, Deposit, - DepositData, Domain, Epoch, EthSpec, ForkName, Graffiti, Hash256, IndexedAttestation, Keypair, - ProposerSlashing, PublicKeyBytes, SelectionProof, SignatureBytes, SignedAggregateAndProof, - SignedBeaconBlock, SignedBeaconBlockHash, SignedContributionAndProof, SignedRoot, - SignedVoluntaryExit, Slot, SubnetId, SyncCommittee, SyncCommitteeContribution, - SyncCommitteeMessage, VariableList, VoluntaryExit, -}; +use types::{typenum::U4294967296, *}; // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; @@ -69,7 +58,7 @@ pub type BaseHarnessType = pub type DiskHarnessType = BaseHarnessType, LevelDB>; pub type EphemeralHarnessType = BaseHarnessType, MemoryStore>; -type BoxedMutator = Box< +pub type BoxedMutator = Box< dyn FnOnce( BeaconChainBuilder>, ) -> BeaconChainBuilder>, @@ -156,8 +145,8 @@ pub struct Builder { initial_mutator: Option>, store_mutator: Option>, execution_layer: Option, - execution_layer_runtime: Option, mock_execution_layer: Option>, + runtime: TestRuntime, log: Logger, } @@ -260,6 +249,9 @@ where Cold: ItemStore, { pub fn new(eth_spec_instance: E) -> Self { + let runtime = TestRuntime::default(); + let log = runtime.log.clone(); + Self { eth_spec_instance, spec: None, @@ -271,8 +263,8 @@ where store_mutator: None, execution_layer: None, mock_execution_layer: None, - execution_layer_runtime: None, - log: test_logger(), + runtime, + log, } } @@ -335,8 +327,6 @@ where "execution layer already defined" ); - let el_runtime = ExecutionLayerRuntime::default(); - let urls: Vec = urls .iter() .map(|s| SensitiveUrl::parse(*s)) @@ -351,19 +341,19 @@ where }; let execution_layer = ExecutionLayer::from_config( config, - el_runtime.task_executor.clone(), - el_runtime.log.clone(), + self.runtime.task_executor.clone(), + self.log.clone(), ) .unwrap(); self.execution_layer = Some(execution_layer); - self.execution_layer_runtime = Some(el_runtime); self } pub fn mock_execution_layer(mut self) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); let mock = MockExecutionLayer::new( + self.runtime.task_executor.clone(), spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, @@ -388,7 +378,7 @@ where pub fn build(self) -> BeaconChainHarness> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); - let log = test_logger(); + let log = self.log; let spec = self.spec.expect("cannot build without spec"); let seconds_per_slot = spec.seconds_per_slot; let validator_keypairs = self @@ -400,6 +390,7 @@ where .custom_spec(spec) .store(self.store.expect("cannot build without store")) .store_migrator_config(MigratorConfig::default().blocking()) + .task_executor(self.runtime.task_executor.clone()) .execution_layer(self.execution_layer) .dummy_eth1_backend() .expect("should build dummy backend") @@ -439,8 +430,8 @@ where chain: Arc::new(chain), validator_keypairs, shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), + runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, - execution_layer_runtime: self.execution_layer_runtime, rng: make_rng(), } } @@ -456,9 +447,9 @@ pub struct BeaconChainHarness { pub chain: Arc>, pub spec: ChainSpec, pub shutdown_receiver: Arc>>, + pub runtime: TestRuntime, pub mock_execution_layer: Option>, - pub execution_layer_runtime: Option, pub rng: Mutex, } @@ -533,8 +524,11 @@ where self.chain.slot().unwrap() } - pub fn get_block(&self, block_hash: SignedBeaconBlockHash) -> Option> { - self.chain.get_block(&block_hash.into()).unwrap() + pub fn get_block( + &self, + block_hash: SignedBeaconBlockHash, + ) -> Option>> { + self.chain.get_blinded_block(&block_hash.into()).unwrap() } pub fn block_exists(&self, block_hash: SignedBeaconBlockHash) -> bool { @@ -590,18 +584,7 @@ where // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); - let randao_reveal = { - let epoch = slot.epoch(E::slots_per_epoch()); - let domain = self.spec.get_domain( - epoch, - Domain::Randao, - &state.fork(), - state.genesis_validators_root(), - ); - let message = epoch.signing_root(domain); - let sk = &self.validator_keypairs[proposer_index].sk; - sk.sign(message) - }; + let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); let (block, state) = self .chain @@ -649,18 +632,7 @@ where // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); - let randao_reveal = { - let epoch = slot.epoch(E::slots_per_epoch()); - let domain = self.spec.get_domain( - epoch, - Domain::Randao, - &state.fork(), - state.genesis_validators_root(), - ); - let message = epoch.signing_root(domain); - let sk = &self.validator_keypairs[proposer_index].sk; - sk.sign(message) - }; + let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); let pre_state = state.clone(); @@ -686,6 +658,84 @@ where (signed_block, pre_state) } + /// Create a randao reveal for a block at `slot`. + pub fn sign_randao_reveal( + &self, + state: &BeaconState, + proposer_index: usize, + slot: Slot, + ) -> Signature { + let epoch = slot.epoch(E::slots_per_epoch()); + let domain = self.spec.get_domain( + epoch, + Domain::Randao, + &state.fork(), + state.genesis_validators_root(), + ); + let message = epoch.signing_root(domain); + let sk = &self.validator_keypairs[proposer_index].sk; + sk.sign(message) + } + + /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to + /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the + /// `block` identified by `beacon_block_root`. + /// + /// The attestation doesn't _really_ have anything about it that makes it unaggregated per say, + /// however this function is only required in the context of forming an unaggregated + /// attestation. It would be an (undetectable) violation of the protocol to create a + /// `SignedAggregateAndProof` based upon the output of this function. + /// + /// This function will produce attestations to optimistic blocks, which is against the + /// specification but useful during testing. + pub fn produce_unaggregated_attestation_for_block( + &self, + slot: Slot, + index: CommitteeIndex, + beacon_block_root: Hash256, + mut state: Cow>, + state_root: Hash256, + ) -> Result, BeaconChainError> { + let epoch = slot.epoch(E::slots_per_epoch()); + + if state.slot() > slot { + return Err(BeaconChainError::CannotAttestToFutureState); + } else if state.current_epoch() < epoch { + let mut_state = state.to_mut(); + complete_state_advance( + mut_state, + Some(state_root), + epoch.start_slot(E::slots_per_epoch()), + &self.spec, + )?; + mut_state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + } + + let committee_len = state.get_beacon_committee(slot, index)?.committee.len(); + + let target_slot = epoch.start_slot(E::slots_per_epoch()); + let target_root = if state.slot() <= target_slot { + beacon_block_root + } else { + *state.get_block_root(target_slot)? + }; + + Ok(Attestation { + aggregation_bits: BitList::with_capacity(committee_len)?, + data: AttestationData { + slot, + index, + beacon_block_root, + source: state.current_justified_checkpoint(), + target: Checkpoint { + epoch, + root: target_root, + }, + }, + signature: AggregateSignature::empty(), + }) + } + /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is @@ -717,7 +767,6 @@ where return None; } let mut attestation = self - .chain .produce_unaggregated_attestation_for_block( attestation_slot, bc.index, @@ -900,6 +949,7 @@ where let aggregate = self .chain .get_aggregated_attestation(&attestation.data) + .unwrap() .unwrap_or_else(|| { committee_attestations.iter().skip(1).fold( attestation.clone(), diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index f97c3e4103..4141b7dc01 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -1,11 +1,8 @@ use crate::errors::BeaconChainError; use crate::{BeaconChainTypes, BeaconStore}; -use ssz::{Decode, DecodeError, Encode}; +use ssz::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryInto; -use std::fs::File; -use std::io::{self, Read, Write}; -use std::path::Path; use store::{DBColumn, Error as StoreError, StoreItem}; use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; @@ -24,15 +21,7 @@ pub struct ValidatorPubkeyCache { pubkeys: Vec, indices: HashMap, pubkey_bytes: Vec, - backing: PubkeyCacheBacking, -} - -/// Abstraction over on-disk backing. -/// -/// `File` backing is legacy, `Database` is current. -enum PubkeyCacheBacking { - File(ValidatorPubkeyCacheFile), - Database(BeaconStore), + store: BeaconStore, } impl ValidatorPubkeyCache { @@ -48,7 +37,7 @@ impl ValidatorPubkeyCache { pubkeys: vec![], indices: HashMap::new(), pubkey_bytes: vec![], - backing: PubkeyCacheBacking::Database(store), + store, }; cache.import_new_pubkeys(state)?; @@ -66,7 +55,9 @@ impl ValidatorPubkeyCache { if let Some(DatabasePubkey(pubkey)) = store.get_item(&DatabasePubkey::key_for_index(validator_index))? { - pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?); + pubkeys.push((&pubkey).try_into().map_err(|e| { + BeaconChainError::ValidatorPubkeyCacheError(format!("{:?}", e)) + })?); pubkey_bytes.push(pubkey); indices.insert(pubkey, validator_index); } else { @@ -78,31 +69,10 @@ impl ValidatorPubkeyCache { pubkeys, indices, pubkey_bytes, - backing: PubkeyCacheBacking::Database(store), + store, }) } - /// DEPRECATED: used only for migration - pub fn load_from_file>(path: P) -> Result { - ValidatorPubkeyCacheFile::open(&path) - .and_then(ValidatorPubkeyCacheFile::into_cache) - .map_err(Into::into) - } - - /// Convert a cache using `File` backing to one using `Database` backing. - /// - /// This will write all of the keys from `existing_cache` to `store`. - pub fn convert(existing_cache: Self, store: BeaconStore) -> Result { - let mut result = ValidatorPubkeyCache { - pubkeys: Vec::with_capacity(existing_cache.pubkeys.len()), - indices: HashMap::with_capacity(existing_cache.indices.len()), - pubkey_bytes: Vec::with_capacity(existing_cache.indices.len()), - backing: PubkeyCacheBacking::Database(store), - }; - result.import(existing_cache.pubkeys.iter().map(PublicKeyBytes::from))?; - Ok(result) - } - /// Scan the given `state` and add any new validator public keys. /// /// Does not delete any keys from `self` if they don't appear in `state`. @@ -148,14 +118,8 @@ impl ValidatorPubkeyCache { // The motivation behind this ordering is that we do not want to have states that // reference a pubkey that is not in our cache. However, it's fine to have pubkeys // that are never referenced in a state. - match &mut self.backing { - PubkeyCacheBacking::File(persistence_file) => { - persistence_file.append(i, &pubkey)?; - } - PubkeyCacheBacking::Database(store) => { - store.put_item(&DatabasePubkey::key_for_index(i), &DatabasePubkey(pubkey))?; - } - } + self.store + .put_item(&DatabasePubkey::key_for_index(i), &DatabasePubkey(pubkey))?; self.pubkeys.push( (&pubkey) @@ -221,105 +185,6 @@ impl DatabasePubkey { } } -/// Allows for maintaining an on-disk copy of the `ValidatorPubkeyCache`. The file is raw SSZ bytes -/// (not ASCII encoded). -/// -/// ## Writes -/// -/// Each entry is simply appended to the file. -/// -/// ## Reads -/// -/// The whole file is parsed as an SSZ "variable list" of objects. -/// -/// This parsing method is possible because the items in the list are fixed-length SSZ objects. -struct ValidatorPubkeyCacheFile(File); - -#[derive(Debug)] -enum Error { - Io(io::Error), - Ssz(DecodeError), - PubkeyDecode(bls::Error), - /// The file read from disk does not have a contiguous list of validator public keys. The file - /// has become corrupted. - InconsistentIndex { - _expected: Option, - _found: usize, - }, -} - -impl From for BeaconChainError { - fn from(e: Error) -> BeaconChainError { - BeaconChainError::ValidatorPubkeyCacheFileError(format!("{:?}", e)) - } -} - -impl ValidatorPubkeyCacheFile { - /// Opens an existing file for reading and writing. - pub fn open>(path: P) -> Result { - File::options() - .read(true) - .write(true) - .create(false) - .append(true) - .open(path) - .map(Self) - .map_err(Error::Io) - } - - /// Append a public key to file. - /// - /// The provided `index` should each be one greater than the previous and start at 0. - /// Otherwise, the file will become corrupted and unable to be converted into a cache . - pub fn append(&mut self, index: usize, pubkey: &PublicKeyBytes) -> Result<(), Error> { - append_to_file(&mut self.0, index, pubkey) - } - - /// Creates a `ValidatorPubkeyCache` by reading and parsing the underlying file. - pub fn into_cache(mut self) -> Result, Error> { - let mut bytes = vec![]; - self.0.read_to_end(&mut bytes).map_err(Error::Io)?; - - let list: Vec<(usize, PublicKeyBytes)> = Vec::from_ssz_bytes(&bytes).map_err(Error::Ssz)?; - - let mut last = None; - let mut pubkeys = Vec::with_capacity(list.len()); - let mut indices = HashMap::with_capacity(list.len()); - let mut pubkey_bytes = Vec::with_capacity(list.len()); - - for (index, pubkey) in list { - let expected = last.map(|n| n + 1); - if expected.map_or(true, |expected| index == expected) { - last = Some(index); - pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?); - pubkey_bytes.push(pubkey); - indices.insert(pubkey, index); - } else { - return Err(Error::InconsistentIndex { - _expected: expected, - _found: index, - }); - } - } - - Ok(ValidatorPubkeyCache { - pubkeys, - indices, - pubkey_bytes, - backing: PubkeyCacheBacking::File(self), - }) - } -} - -fn append_to_file(file: &mut File, index: usize, pubkey: &PublicKeyBytes) -> Result<(), Error> { - let mut line = Vec::with_capacity(index.ssz_bytes_len() + pubkey.ssz_bytes_len()); - - index.ssz_append(&mut line); - pubkey.ssz_append(&mut line); - - file.write_all(&line).map_err(Error::Io) -} - #[cfg(test)] mod test { use super::*; @@ -327,10 +192,7 @@ mod test { use logging::test_logger; use std::sync::Arc; use store::HotColdDB; - use tempfile::tempdir; - use types::{ - test_utils::generate_deterministic_keypair, BeaconState, EthSpec, Keypair, MainnetEthSpec, - }; + use types::{BeaconState, EthSpec, Keypair, MainnetEthSpec}; type E = MainnetEthSpec; type T = EphemeralHarnessType; @@ -424,7 +286,7 @@ mod test { check_cache_get(&cache, &keypairs[..]); drop(cache); - // Re-init the cache from the file. + // Re-init the cache from the store. let mut cache = ValidatorPubkeyCache::load_from_store(store.clone()).expect("should open cache"); check_cache_get(&cache, &keypairs[..]); @@ -437,36 +299,8 @@ mod test { check_cache_get(&cache, &keypairs[..]); drop(cache); - // Re-init the cache from the file. + // Re-init the cache from the store. let cache = ValidatorPubkeyCache::load_from_store(store).expect("should open cache"); check_cache_get(&cache, &keypairs[..]); } - - #[test] - fn invalid_persisted_file() { - let dir = tempdir().expect("should create tempdir"); - let path = dir.path().join("cache.ssz"); - let pubkey = generate_deterministic_keypair(0).pk.into(); - - let mut file = File::create(&path).expect("should create file"); - append_to_file(&mut file, 0, &pubkey).expect("should write to file"); - drop(file); - - let cache = ValidatorPubkeyCache::::load_from_file(&path).expect("should open cache"); - drop(cache); - - let mut file = File::options() - .write(true) - .append(true) - .open(&path) - .expect("should open file"); - - append_to_file(&mut file, 42, &pubkey).expect("should write bad data to file"); - drop(file); - - assert!( - ValidatorPubkeyCache::::load_from_file(&path).is_err(), - "should not parse invalid file" - ); - } } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 189d3baded..b1d1f71d6c 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -55,11 +55,15 @@ fn produces_attestations() { Slot::from(num_blocks_produced) }; - let block = chain + let blinded_block = chain .block_at_slot(block_slot, WhenSlotSkipped::Prev) .expect("should get block") .expect("block should not be skipped"); - let block_root = block.message().tree_hash_root(); + let block_root = blinded_block.message().tree_hash_root(); + let block = chain + .store + .make_full_block(&block_root, blinded_block) + .unwrap(); let epoch_boundary_slot = state .current_epoch() @@ -144,3 +148,58 @@ fn produces_attestations() { } } } + +/// Ensures that the early attester cache wont create an attestation to a block in a later slot than +/// the one requested. +#[test] +fn early_attester_cache_old_request() { + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .keypairs(KEYPAIRS[..].to_vec()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + harness.advance_slot(); + + harness.extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let head = harness.chain.head().unwrap(); + assert_eq!(head.beacon_block.slot(), 2); + let head_proto_block = harness + .chain + .fork_choice + .read() + .get_block(&head.beacon_block_root) + .unwrap(); + + harness + .chain + .early_attester_cache + .add_head_block( + head.beacon_block_root, + head.beacon_block.clone(), + head_proto_block, + &head.beacon_state, + &harness.chain.spec, + ) + .unwrap(); + + let attest_slot = head.beacon_block.slot() - 1; + let attestation = harness + .chain + .produce_unaggregated_attestation(attest_slot, 0) + .unwrap(); + + assert_eq!(attestation.data.slot, attest_slot); + let attested_block = harness + .chain + .get_blinded_block(&attestation.data.beacon_block_root) + .unwrap() + .unwrap(); + assert_eq!(attested_block.slot(), attest_slot); +} diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 00bf9fa9aa..2fe8818a9a 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -975,7 +975,7 @@ fn attestation_that_skips_epochs() { let block_slot = harness .chain .store - .get_block(&block_root) + .get_blinded_block(&block_root) .expect("should not error getting block") .expect("should find attestation block") .message() diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index b03e66ac80..f91597c8f6 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -12,6 +12,7 @@ use state_processing::{ per_block_processing::{per_block_processing, BlockSignatureStrategy}, per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, }; +use std::marker::PhantomData; use std::sync::Arc; use tempfile::tempdir; use types::{test_utils::generate_deterministic_keypair, *}; @@ -45,6 +46,18 @@ fn get_chain_segment() -> Vec> { .chain_dump() .expect("should dump chain") .into_iter() + .map(|snapshot| { + let full_block = harness + .chain + .store + .make_full_block(&snapshot.beacon_block_root, snapshot.beacon_block) + .unwrap(); + BeaconSnapshot { + beacon_block_root: snapshot.beacon_block_root, + beacon_block: full_block, + beacon_state: snapshot.beacon_state, + } + }) .skip(1) .collect() } @@ -962,6 +975,7 @@ fn add_base_block_to_altair_chain() { attestations: altair_body.attestations.clone(), deposits: altair_body.deposits.clone(), voluntary_exits: altair_body.voluntary_exits.clone(), + _phantom: PhantomData, }, }, signature: Signature::empty(), @@ -1083,6 +1097,7 @@ fn add_altair_block_to_base_chain() { deposits: base_body.deposits.clone(), voluntary_exits: base_body.voluntary_exits.clone(), sync_aggregate: SyncAggregate::empty(), + _phantom: PhantomData, }, }, signature: Signature::empty(), diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index d3ef3ea5e3..d67ed35f9c 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -8,17 +8,20 @@ const VALIDATOR_COUNT: usize = 32; type E = MainnetEthSpec; -fn verify_execution_payload_chain(chain: &[ExecutionPayload]) { - let mut prev_ep: Option> = None; +fn verify_execution_payload_chain(chain: &[FullPayload]) { + let mut prev_ep: Option> = None; for ep in chain { - assert!(*ep != ExecutionPayload::default()); - assert!(ep.block_hash != ExecutionBlockHash::zero()); + assert!(*ep != FullPayload::default()); + assert!(ep.block_hash() != ExecutionBlockHash::zero()); // Check against previous `ExecutionPayload`. if let Some(prev_ep) = prev_ep { - assert_eq!(prev_ep.block_hash, ep.parent_hash); - assert_eq!(prev_ep.block_number + 1, ep.block_number); + assert_eq!(prev_ep.block_hash(), ep.execution_payload.parent_hash); + assert_eq!( + prev_ep.execution_payload.block_number + 1, + ep.execution_payload.block_number + ); } prev_ep = Some(ep.clone()); } @@ -83,12 +86,12 @@ fn merge_with_terminal_block_hash_override() { let execution_payload = block.message().body().execution_payload().unwrap().clone(); if i == 0 { - assert_eq!(execution_payload.block_hash, genesis_pow_block_hash); + assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); } execution_payloads.push(execution_payload); } - verify_execution_payload_chain(&execution_payloads); + verify_execution_payload_chain(execution_payloads.as_slice()); } #[test] @@ -138,7 +141,7 @@ fn base_altair_merge_with_terminal_block_after_fork() { assert_eq!(merge_head.slot(), merge_fork_slot); assert_eq!( *merge_head.message().body().execution_payload().unwrap(), - ExecutionPayload::default() + FullPayload::default() ); /* @@ -154,7 +157,7 @@ fn base_altair_merge_with_terminal_block_after_fork() { .body() .execution_payload() .unwrap(), - ExecutionPayload::default() + FullPayload::default() ); assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1); @@ -178,5 +181,5 @@ fn base_altair_merge_with_terminal_block_after_fork() { execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); } - verify_execution_payload_chain(&execution_payloads); + verify_execution_payload_chain(execution_payloads.as_slice()); } diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 4d2dfccac2..1aa9844a35 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -6,24 +6,30 @@ use beacon_chain::{ WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ - json_structures::JsonPayloadAttributesV1, ExecutionLayer, PayloadAttributes, + json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, + ExecutionLayer, ForkChoiceState, PayloadAttributes, }; -use proto_array::ExecutionStatus; +use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; +use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; +use std::time::Duration; use task_executor::ShutdownReason; +use tree_hash::TreeHash; use types::*; const VALIDATOR_COUNT: usize = 32; type E = MainnetEthSpec; -#[derive(PartialEq, Clone)] +#[derive(PartialEq, Clone, Copy)] enum Payload { Valid, Invalid { latest_valid_hash: Option, }, Syncing, + InvalidBlockHash, + InvalidTerminalBlock, } struct InvalidPayloadRig { @@ -65,14 +71,14 @@ impl InvalidPayloadRig { fn block_hash(&self, block_root: Hash256) -> ExecutionBlockHash { self.harness .chain - .get_block(&block_root) + .get_blinded_block(&block_root) .unwrap() .unwrap() .message() .body() .execution_payload() .unwrap() - .block_hash + .block_hash() } fn execution_status(&self, block_root: Hash256) -> ExecutionStatus { @@ -93,17 +99,28 @@ impl InvalidPayloadRig { self.harness.chain.head_info().unwrap() } - fn previous_payload_attributes(&self) -> PayloadAttributes { + fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let json = mock_execution_layer .server .take_previous_request() .expect("no previous request"); let params = json.get("params").expect("no params"); + + let fork_choice_state_json = params.get(0).expect("no payload param"); + let fork_choice_state: JsonForkChoiceStateV1 = + serde_json::from_value(fork_choice_state_json.clone()).unwrap(); + let payload_param_json = params.get(1).expect("no payload param"); let attributes: JsonPayloadAttributesV1 = serde_json::from_value(payload_param_json.clone()).unwrap(); - attributes.into() + + (fork_choice_state.into(), attributes.into()) + } + + fn previous_payload_attributes(&self) -> PayloadAttributes { + let (_, payload_attributes) = self.previous_forkchoice_update_params(); + payload_attributes } fn move_to_terminal_block(&self) { @@ -115,6 +132,16 @@ impl InvalidPayloadRig { .unwrap(); } + fn latest_execution_block_hash(&self) -> ExecutionBlockHash { + let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .execution_block_generator() + .latest_execution_block() + .unwrap() + .block_hash + } + fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { (0..num_blocks) .map(|_| self.import_block(is_valid.clone())) @@ -129,8 +156,9 @@ impl InvalidPayloadRig { assert_eq!(justified_checkpoint.epoch, 2); } + /// Import a block while setting the newPayload and forkchoiceUpdated responses to `is_valid`. fn import_block(&mut self, is_valid: Payload) -> Hash256 { - self.import_block_parametric(is_valid, |error| { + self.import_block_parametric(is_valid, is_valid, |error| { matches!( error, BlockError::ExecutionPayloadError( @@ -147,9 +175,19 @@ impl InvalidPayloadRig { .unwrap() } + fn validate_manually(&self, block_root: Hash256) { + self.harness + .chain + .fork_choice + .write() + .on_valid_execution_payload(block_root) + .unwrap(); + } + fn import_block_parametric) -> bool>( &mut self, - is_valid: Payload, + new_payload_response: Payload, + forkchoice_response: Payload, evaluate_error: F, ) -> Hash256 { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); @@ -160,15 +198,54 @@ impl InvalidPayloadRig { let (block, post_state) = self.harness.make_block(state, slot); let block_root = block.canonical_root(); - match is_valid { - Payload::Valid | Payload::Syncing => { - if is_valid == Payload::Syncing { - // Importing a payload whilst returning `SYNCING` simulates an EE that obtains - // the block via it's own means (e.g., devp2p). - let should_import_payload = true; - mock_execution_layer - .server - .all_payloads_syncing(should_import_payload); + let set_new_payload = |payload: Payload| match payload { + Payload::Valid => mock_execution_layer + .server + .all_payloads_valid_on_new_payload(), + Payload::Syncing => mock_execution_layer + .server + .all_payloads_syncing_on_new_payload(true), + Payload::Invalid { latest_valid_hash } => { + let latest_valid_hash = latest_valid_hash + .unwrap_or_else(|| self.block_hash(block.message().parent_root())); + mock_execution_layer + .server + .all_payloads_invalid_on_new_payload(latest_valid_hash) + } + Payload::InvalidBlockHash => mock_execution_layer + .server + .all_payloads_invalid_block_hash_on_new_payload(), + Payload::InvalidTerminalBlock => mock_execution_layer + .server + .all_payloads_invalid_terminal_block_on_new_payload(), + }; + let set_forkchoice_updated = |payload: Payload| match payload { + Payload::Valid => mock_execution_layer + .server + .all_payloads_valid_on_forkchoice_updated(), + Payload::Syncing => mock_execution_layer + .server + .all_payloads_syncing_on_forkchoice_updated(), + Payload::Invalid { latest_valid_hash } => { + let latest_valid_hash = latest_valid_hash + .unwrap_or_else(|| self.block_hash(block.message().parent_root())); + mock_execution_layer + .server + .all_payloads_invalid_on_forkchoice_updated(latest_valid_hash) + } + Payload::InvalidBlockHash => mock_execution_layer + .server + .all_payloads_invalid_block_hash_on_forkchoice_updated(), + Payload::InvalidTerminalBlock => mock_execution_layer + .server + .all_payloads_invalid_terminal_block_on_forkchoice_updated(), + }; + + match (new_payload_response, forkchoice_response) { + (Payload::Valid | Payload::Syncing, Payload::Valid | Payload::Syncing) => { + if new_payload_response == Payload::Syncing { + set_new_payload(new_payload_response); + set_forkchoice_updated(forkchoice_response); } else { mock_execution_layer.server.full_payload_verification(); } @@ -187,52 +264,81 @@ impl InvalidPayloadRig { let execution_status = self.execution_status(root.into()); - match is_valid { - Payload::Syncing => assert!(execution_status.is_not_verified()), - Payload::Valid => assert!(execution_status.is_valid()), - Payload::Invalid { .. } => unreachable!(), + match forkchoice_response { + Payload::Syncing => assert!(execution_status.is_optimistic()), + Payload::Valid => assert!(execution_status.is_valid_and_post_bellatrix()), + Payload::Invalid { .. } + | Payload::InvalidBlockHash + | Payload::InvalidTerminalBlock => unreachable!(), } assert_eq!( - self.harness.chain.get_block(&block_root).unwrap().unwrap(), + self.harness + .chain + .store + .get_full_block(&block_root) + .unwrap() + .unwrap(), block, "block from db must match block imported" ); } - Payload::Invalid { latest_valid_hash } => { - let latest_valid_hash = latest_valid_hash - .unwrap_or_else(|| self.block_hash(block.message().parent_root())); - - mock_execution_layer - .server - .all_payloads_invalid(latest_valid_hash); + ( + Payload::Invalid { .. } | Payload::InvalidBlockHash | Payload::InvalidTerminalBlock, + _, + ) + | ( + _, + Payload::Invalid { .. } | Payload::InvalidBlockHash | Payload::InvalidTerminalBlock, + ) => { + set_new_payload(new_payload_response); + set_forkchoice_updated(forkchoice_response); match self.harness.process_block(slot, block) { Err(error) if evaluate_error(&error) => (), Err(other) => { panic!("evaluate_error returned false with {:?}", other) } - Ok(_) => panic!("block with invalid payload was imported"), + Ok(_) => { + // An invalid payload should only be imported initially if its status when + // initially supplied to the EE is Valid or Syncing. + assert!(matches!( + new_payload_response, + Payload::Valid | Payload::Syncing + )); + } }; - assert!( - self.harness - .chain - .fork_choice - .read() - .get_block(&block_root) - .is_none(), - "invalid block must not exist in fork choice" - ); - assert!( - self.harness.chain.get_block(&block_root).unwrap().is_none(), - "invalid block cannot be accessed via get_block" - ); + let block_in_forkchoice = + self.harness.chain.fork_choice.read().get_block(&block_root); + if let Payload::Invalid { .. } = new_payload_response { + // A block found to be immediately invalid should not end up in fork choice. + assert_eq!(block_in_forkchoice, None); + + assert!( + self.harness + .chain + .get_blinded_block(&block_root) + .unwrap() + .is_none(), + "invalid block cannot be accessed via get_block" + ); + } else { + // A block imported and then found invalid should have an invalid status. + assert!(block_in_forkchoice.unwrap().execution_status.is_invalid()); + } } } block_root } + + fn invalidate_manually(&self, block_root: Hash256) { + self.harness + .chain + .process_invalid_execution_payload(&InvalidationOperation::InvalidateOne { block_root }) + .unwrap(); + } } /// Simple test of the different import types. @@ -269,13 +375,55 @@ fn invalid_payload_invalidates_parent() { latest_valid_hash: Some(latest_valid_hash), }); - assert!(rig.execution_status(roots[0]).is_valid()); + assert!(rig.execution_status(roots[0]).is_valid_and_post_bellatrix()); assert!(rig.execution_status(roots[1]).is_invalid()); assert!(rig.execution_status(roots[2]).is_invalid()); assert_eq!(rig.head_info().block_root, roots[0]); } +/// Test invalidation of a payload via the fork choice updated message. +/// +/// The `invalid_payload` argument determines the type of invalid payload: `Invalid`, +/// `InvalidBlockHash`, etc, taking the `latest_valid_hash` as an argument. +fn immediate_forkchoice_update_invalid_test( + invalid_payload: impl FnOnce(Option) -> Payload, +) { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing); + + let valid_head_root = rig.import_block(Payload::Valid); + let latest_valid_hash = Some(rig.block_hash(valid_head_root)); + + // Import a block which returns syncing when supplied via newPayload, and then + // invalid when the forkchoice update is sent. + rig.import_block_parametric(Payload::Syncing, invalid_payload(latest_valid_hash), |_| { + false + }); + + // The head should be the latest valid block. + assert_eq!(rig.head_info().block_root, valid_head_root); +} + +#[test] +fn immediate_forkchoice_update_payload_invalid() { + immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid { + latest_valid_hash, + }) +} + +#[test] +fn immediate_forkchoice_update_payload_invalid_block_hash() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash) +} + +#[test] +fn immediate_forkchoice_update_payload_invalid_terminal_block() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock) +} + /// Ensure the client tries to exit when the justified checkpoint is invalidated. #[test] fn justified_checkpoint_becomes_invalid() { @@ -288,7 +436,7 @@ fn justified_checkpoint_becomes_invalid() { let parent_root_of_justified = rig .harness .chain - .get_block(&justified_checkpoint.root) + .get_blinded_block(&justified_checkpoint.root) .unwrap() .unwrap() .parent_root(); @@ -298,19 +446,17 @@ fn justified_checkpoint_becomes_invalid() { assert!(rig.harness.shutdown_reasons().is_empty()); // Import a block that will invalidate the justified checkpoint. - rig.import_block_parametric( - Payload::Invalid { - latest_valid_hash: Some(parent_hash_of_justified), - }, - |error| { - matches!( - error, - // The block import should fail since the beacon chain knows the justified payload - // is invalid. - BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) - ) - }, - ); + let is_valid = Payload::Invalid { + latest_valid_hash: Some(parent_hash_of_justified), + }; + rig.import_block_parametric(is_valid, is_valid, |error| { + matches!( + error, + // The block import should fail since the beacon chain knows the justified payload + // is invalid. + BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) + ) + }); // The beacon chain should have triggered a shutdown. assert_eq!( @@ -357,9 +503,9 @@ fn pre_finalized_latest_valid_hash() { let slot = Slot::new(i); let root = rig.block_root_at_slot(slot).unwrap(); if slot == 1 { - assert!(rig.execution_status(root).is_valid()); + assert!(rig.execution_status(root).is_valid_and_post_bellatrix()); } else { - assert!(rig.execution_status(root).is_not_verified()); + assert!(rig.execution_status(root).is_optimistic()); } } } @@ -406,7 +552,7 @@ fn latest_valid_hash_will_validate() { } else if slot == 0 { assert!(execution_status.is_irrelevant()) } else { - assert!(execution_status.is_valid()) + assert!(execution_status.is_valid_and_post_bellatrix()) } } } @@ -444,9 +590,9 @@ fn latest_valid_hash_is_junk() { let slot = Slot::new(i); let root = rig.block_root_at_slot(slot).unwrap(); if slot == 1 { - assert!(rig.execution_status(root).is_valid()); + assert!(rig.execution_status(root).is_valid_and_post_bellatrix()); } else { - assert!(rig.execution_status(root).is_not_verified()); + assert!(rig.execution_status(root).is_optimistic()); } } } @@ -506,7 +652,13 @@ fn invalidates_all_descendants() { assert!(rig.execution_status(fork_block_root).is_invalid()); for root in blocks { - let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot(); + let slot = rig + .harness + .chain + .get_blinded_block(&root) + .unwrap() + .unwrap() + .slot(); // Fork choice doesn't have info about pre-finalization, nothing to check here. if slot < finalized_slot { @@ -516,7 +668,7 @@ fn invalidates_all_descendants() { let execution_status = rig.execution_status(root); if slot <= latest_valid_slot { // Blocks prior to the latest valid hash are valid. - assert!(execution_status.is_valid()); + assert!(execution_status.is_valid_and_post_bellatrix()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); @@ -567,10 +719,16 @@ fn switches_heads() { assert_eq!(rig.head_info().block_root, fork_block_root); // The fork block has not yet been validated. - assert!(rig.execution_status(fork_block_root).is_not_verified()); + assert!(rig.execution_status(fork_block_root).is_optimistic()); for root in blocks { - let slot = rig.harness.chain.get_block(&root).unwrap().unwrap().slot(); + let slot = rig + .harness + .chain + .get_blinded_block(&root) + .unwrap() + .unwrap() + .slot(); // Fork choice doesn't have info about pre-finalization, nothing to check here. if slot < finalized_slot { @@ -580,7 +738,7 @@ fn switches_heads() { let execution_status = rig.execution_status(root); if slot <= latest_valid_slot { // Blocks prior to the latest valid hash are valid. - assert!(execution_status.is_valid()); + assert!(execution_status.is_valid_and_post_bellatrix()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); @@ -602,9 +760,17 @@ fn invalid_during_processing() { ]; // 0 should be present in the chain. - assert!(rig.harness.chain.get_block(&roots[0]).unwrap().is_some()); + assert!(rig + .harness + .chain + .get_blinded_block(&roots[0]) + .unwrap() + .is_some()); // 1 should *not* be present in the chain. - assert_eq!(rig.harness.chain.get_block(&roots[1]).unwrap(), None); + assert_eq!( + rig.harness.chain.get_blinded_block(&roots[1]).unwrap(), + None + ); // 2 should be the head. let head = rig.harness.chain.head_info().unwrap(); assert_eq!(head.block_root, roots[2]); @@ -623,7 +789,7 @@ fn invalid_after_optimistic_sync() { ]; for root in &roots { - assert!(rig.harness.chain.get_block(root).unwrap().is_some()); + assert!(rig.harness.chain.get_blinded_block(root).unwrap().is_some()); } // 2 should be the head. @@ -642,6 +808,42 @@ fn invalid_after_optimistic_sync() { assert_eq!(head.block_root, roots[1]); } +#[test] +fn manually_validate_child() { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. + + let parent = rig.import_block(Payload::Syncing); + let child = rig.import_block(Payload::Syncing); + + assert!(rig.execution_status(parent).is_optimistic()); + assert!(rig.execution_status(child).is_optimistic()); + + rig.validate_manually(child); + + assert!(rig.execution_status(parent).is_valid_and_post_bellatrix()); + assert!(rig.execution_status(child).is_valid_and_post_bellatrix()); +} + +#[test] +fn manually_validate_parent() { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. + + let parent = rig.import_block(Payload::Syncing); + let child = rig.import_block(Payload::Syncing); + + assert!(rig.execution_status(parent).is_optimistic()); + assert!(rig.execution_status(child).is_optimistic()); + + rig.validate_manually(parent); + + assert!(rig.execution_status(parent).is_valid_and_post_bellatrix()); + assert!(rig.execution_status(child).is_optimistic()); +} + #[test] fn payload_preparation() { let mut rig = InvalidPayloadRig::new(); @@ -693,3 +895,223 @@ fn payload_preparation() { }; assert_eq!(rig.previous_payload_attributes(), payload_attributes); } + +#[test] +fn invalid_parent() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. + + // Import a syncing block atop the transition block (we'll call this the "parent block" since we + // build another block on it later). + let parent_root = rig.import_block(Payload::Syncing); + let parent_block = rig.harness.get_block(parent_root.into()).unwrap(); + let parent_state = rig + .harness + .get_hot_state(parent_block.state_root().into()) + .unwrap(); + + // Produce another block atop the parent, but don't import yet. + let slot = parent_block.slot() + 1; + rig.harness.set_current_slot(slot); + let (block, state) = rig.harness.make_block(parent_state, slot); + let block_root = block.canonical_root(); + assert_eq!(block.parent_root(), parent_root); + + // Invalidate the parent block. + rig.invalidate_manually(parent_root); + assert!(rig.execution_status(parent_root).is_invalid()); + + // Ensure the block built atop an invalid payload is invalid for gossip. + assert!(matches!( + rig.harness.chain.verify_block_for_gossip(block.clone()), + Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) + if invalid_root == parent_root + )); + + // Ensure the block built atop an invalid payload is invalid for import. + assert!(matches!( + rig.harness.chain.process_block(block.clone()), + Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) + if invalid_root == parent_root + )); + + // Ensure the block built atop an invalid payload cannot be imported to fork choice. + let (block, _block_signature) = block.deconstruct(); + assert!(matches!( + rig.harness.chain.fork_choice.write().on_block( + slot, + &block, + block_root, + Duration::from_secs(0), + &state, + PayloadVerificationStatus::Optimistic, + &rig.harness.chain.spec + ), + Err(ForkChoiceError::ProtoArrayError(message)) + if message.contains(&format!( + "{:?}", + ProtoArrayError::ParentExecutionStatusIsInvalid { + block_root, + parent_root + } + )) + )); +} + +/// Tests to ensure that we will still send a proposer preparation +#[test] +fn payload_preparation_before_transition_block() { + let rig = InvalidPayloadRig::new(); + let el = rig.execution_layer(); + + let head = rig.harness.chain.head().unwrap(); + let head_info = rig.head_info(); + assert!( + !head_info.is_merge_transition_complete, + "the head block is pre-transition" + ); + assert_eq!( + head_info.execution_payload_block_hash, + Some(ExecutionBlockHash::zero()), + "the head block is post-bellatrix" + ); + + let current_slot = rig.harness.chain.slot().unwrap(); + let next_slot = current_slot + 1; + let proposer = head + .beacon_state + .get_beacon_proposer_index(next_slot, &rig.harness.chain.spec) + .unwrap(); + let fee_recipient = Address::repeat_byte(99); + + // Provide preparation data to the EL for `proposer`. + el.update_proposer_preparation_blocking( + Epoch::new(0), + &[ProposerPreparationData { + validator_index: proposer as u64, + fee_recipient, + }], + ) + .unwrap(); + + rig.move_to_terminal_block(); + + rig.harness + .chain + .prepare_beacon_proposer_blocking() + .unwrap(); + rig.harness + .chain + .update_execution_engine_forkchoice_blocking(current_slot) + .unwrap(); + + let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); + let latest_block_hash = rig.latest_execution_block_hash(); + assert_eq!(payload_attributes.suggested_fee_recipient, fee_recipient); + assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); +} + +#[test] +fn attesting_to_optimistic_head() { + let mut rig = InvalidPayloadRig::new(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid); // Import a valid transition block. + + let root = rig.import_block(Payload::Syncing); + + let head = rig.harness.chain.head().unwrap(); + let slot = head.beacon_block.slot(); + assert_eq!( + head.beacon_block_root, root, + "the head should be the latest imported block" + ); + assert!( + rig.execution_status(root).is_optimistic(), + "the head should be optimistic" + ); + + /* + * Define an attestation for use during testing. It doesn't have a valid signature, but that's + * not necessary here. + */ + + let attestation = { + let mut attestation = rig + .harness + .chain + .produce_unaggregated_attestation(Slot::new(0), 0) + .unwrap(); + + attestation.aggregation_bits.set(0, true).unwrap(); + attestation.data.slot = slot; + attestation.data.beacon_block_root = root; + + rig.harness + .chain + .naive_aggregation_pool + .write() + .insert(&attestation) + .unwrap(); + + attestation + }; + + /* + * Define some closures to produce attestations. + */ + + let produce_unaggregated = || rig.harness.chain.produce_unaggregated_attestation(slot, 0); + + let get_aggregated = || { + rig.harness + .chain + .get_aggregated_attestation(&attestation.data) + }; + + let get_aggregated_by_slot_and_root = || { + rig.harness + .chain + .get_aggregated_attestation_by_slot_and_root( + attestation.data.slot, + &attestation.data.tree_hash_root(), + ) + }; + + /* + * Ensure attestation production fails with an optimistic head. + */ + + macro_rules! assert_head_block_not_fully_verified { + ($func: expr) => { + assert!(matches!( + $func, + Err(BeaconChainError::HeadBlockNotFullyVerified { + beacon_block_root, + execution_status + }) + if beacon_block_root == root && matches!(execution_status, ExecutionStatus::Optimistic(_)) + )); + } + } + + assert_head_block_not_fully_verified!(produce_unaggregated()); + assert_head_block_not_fully_verified!(get_aggregated()); + assert_head_block_not_fully_verified!(get_aggregated_by_slot_and_root()); + + /* + * Ensure attestation production succeeds once the head is verified. + * + * This is effectively a control for the previous tests. + */ + + rig.validate_manually(root); + assert!( + rig.execution_status(root).is_valid_and_post_bellatrix(), + "the head should no longer be optimistic" + ); + + produce_unaggregated().unwrap(); + get_aggregated().unwrap(); + get_aggregated_by_slot_and_root().unwrap(); +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index e8b97cae63..326b77e085 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -549,7 +549,7 @@ fn delete_blocks_and_states() { ); let faulty_head_block = store - .get_block(&faulty_head.into()) + .get_blinded_block(&faulty_head.into()) .expect("no errors") .expect("faulty head block exists"); @@ -591,7 +591,7 @@ fn delete_blocks_and_states() { break; } store.delete_block(&block_root).unwrap(); - assert_eq!(store.get_block(&block_root).unwrap(), None); + assert_eq!(store.get_blinded_block(&block_root).unwrap(), None); } // Deleting frozen states should do nothing @@ -836,7 +836,12 @@ fn shuffling_compatible_short_fork() { } fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconState { - let head_block = harness.chain.get_block(&block_root).unwrap().unwrap(); + let head_block = harness + .chain + .store + .get_blinded_block(&block_root) + .unwrap() + .unwrap(); harness .chain .get_state(&head_block.state_root(), Some(head_block.slot())) @@ -1641,7 +1646,7 @@ fn check_all_blocks_exist<'a>( blocks: impl Iterator, ) { for &block_hash in blocks { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); + let block = harness.chain.get_blinded_block(&block_hash.into()).unwrap(); assert!( block.is_some(), "expected block {:?} to be in DB", @@ -1688,7 +1693,7 @@ fn check_no_blocks_exist<'a>( blocks: impl Iterator, ) { for &block_hash in blocks { - let block = harness.chain.get_block(&block_hash.into()).unwrap(); + let block = harness.chain.get_blinded_block(&block_hash.into()).unwrap(); assert!( block.is_none(), "did not expect block {:?} to be in the DB", @@ -1936,7 +1941,12 @@ fn weak_subjectivity_sync() { .unwrap() .unwrap(); let wss_checkpoint = harness.chain.head_info().unwrap().finalized_checkpoint; - let wss_block = harness.get_block(wss_checkpoint.root.into()).unwrap(); + let wss_block = harness + .chain + .store + .get_full_block(&wss_checkpoint.root) + .unwrap() + .unwrap(); let wss_state = full_store .get_state(&wss_block.state_root(), None) .unwrap() @@ -1959,26 +1969,28 @@ fn weak_subjectivity_sync() { let seconds_per_slot = spec.seconds_per_slot; // Initialise a new beacon chain from the finalized checkpoint - let beacon_chain = BeaconChainBuilder::new(MinimalEthSpec) - .store(store.clone()) - .custom_spec(test_spec::()) - .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) - .unwrap() - .logger(log.clone()) - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .testing_slot_clock(Duration::from_secs(seconds_per_slot)) - .expect("should configure testing slot clock") - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .monitor_validators(true, vec![], log) - .build() - .expect("should build"); + let beacon_chain = Arc::new( + BeaconChainBuilder::new(MinimalEthSpec) + .store(store.clone()) + .custom_spec(test_spec::()) + .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) + .unwrap() + .logger(log.clone()) + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .testing_slot_clock(Duration::from_secs(seconds_per_slot)) + .expect("should configure testing slot clock") + .shutdown_sender(shutdown_tx) + .chain_config(ChainConfig::default()) + .event_handler(Some(ServerSentEventHandler::new_with_capacity( + log.clone(), + 1, + ))) + .monitor_validators(true, vec![], log) + .build() + .expect("should build"), + ); // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); @@ -1988,8 +2000,14 @@ fn weak_subjectivity_sync() { for snapshot in new_blocks { let block = &snapshot.beacon_block; + let full_block = harness + .chain + .store + .make_full_block(&snapshot.beacon_block_root, block.clone()) + .unwrap(); + beacon_chain.slot_clock.set_slot(block.slot().as_u64()); - beacon_chain.process_block(block.clone()).unwrap(); + beacon_chain.process_block(full_block).unwrap(); beacon_chain.fork_choice().unwrap(); // Check that the new block's state can be loaded correctly. @@ -2031,13 +2049,13 @@ fn weak_subjectivity_sync() { .map(|s| s.beacon_block.clone()) .collect::>(); beacon_chain - .import_historical_block_batch(&historical_blocks) + .import_historical_block_batch(historical_blocks.clone()) .unwrap(); assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); // Resupplying the blocks should not fail, they can be safely ignored. beacon_chain - .import_historical_block_batch(&historical_blocks) + .import_historical_block_batch(historical_blocks) .unwrap(); // The forwards iterator should now match the original chain @@ -2060,7 +2078,7 @@ fn weak_subjectivity_sync() { .unwrap() .map(Result::unwrap) { - let block = store.get_block(&block_root).unwrap().unwrap(); + let block = store.get_blinded_block(&block_root).unwrap().unwrap(); assert_eq!(block.slot(), slot); } @@ -2520,7 +2538,7 @@ fn check_iterators(harness: &TestHarness) { } fn get_finalized_epoch_boundary_blocks( - dump: &[BeaconSnapshot], + dump: &[BeaconSnapshot>], ) -> HashSet { dump.iter() .cloned() @@ -2528,7 +2546,9 @@ fn get_finalized_epoch_boundary_blocks( .collect() } -fn get_blocks(dump: &[BeaconSnapshot]) -> HashSet { +fn get_blocks( + dump: &[BeaconSnapshot>], +) -> HashSet { dump.iter() .cloned() .map(|checkpoint| checkpoint.beacon_block_root.into()) diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 2a0aa35b1b..7b17937a21 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -744,7 +744,11 @@ fn block_roots_skip_slot_behaviour() { "WhenSlotSkipped::Prev should accurately return the prior skipped block" ); - let expected_block = harness.chain.get_block(&skipped_root).unwrap().unwrap(); + let expected_block = harness + .chain + .get_blinded_block(&skipped_root) + .unwrap() + .unwrap(); assert_eq!( harness @@ -782,7 +786,11 @@ fn block_roots_skip_slot_behaviour() { "WhenSlotSkipped::None and WhenSlotSkipped::Prev should be equal on non-skipped slot" ); - let expected_block = harness.chain.get_block(&skips_prev).unwrap().unwrap(); + let expected_block = harness + .chain + .get_blinded_block(&skips_prev) + .unwrap() + .unwrap(); assert_eq!( harness diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index a34d02ae12..3079d7744e 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -13,7 +13,7 @@ store = { path = "../store" } network = { path = "../network" } timer = { path = "../timer" } lighthouse_network = { path = "../lighthouse_network" } -parking_lot = "0.11.0" +parking_lot = "0.12.0" types = { path = "../../consensus/types" } eth2_config = { path = "../../common/eth2_config" } slot_clock = { path = "../../common/slot_clock" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 353b174a02..59f1bebdb4 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -166,6 +166,7 @@ where let builder = BeaconChainBuilder::new(eth_spec_instance) .logger(context.log().clone()) .store(store) + .task_executor(context.executor.clone()) .custom_spec(spec.clone()) .chain_config(chain_config) .graffiti(graffiti) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index bb9e196f7e..13614af12e 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -149,10 +149,8 @@ impl Config { pub fn get_existing_legacy_data_dir(&self) -> Option { dirs::home_dir() .map(|home_dir| home_dir.join(&self.data_dir)) - // Return `None` if the directory does not exists. - .filter(|dir| dir.exists()) - // Return `None` if the legacy directory is identical to the modern. - .filter(|dir| *dir != self.get_modern_data_dir()) + // Return `None` if the legacy directory does not exist or if it is identical to the modern. + .filter(|dir| dir.exists() && *dir != self.get_modern_data_dir()) } /// Returns the core path for the client. diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 9600ef489b..ecf3c19e30 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dev-dependencies] eth1_test_rig = { path = "../../testing/eth1_test_rig" } toml = "0.5.6" -web3 = { version = "0.17.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } +web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } sloggers = { version = "2.1.1", features = ["json"] } environment = { path = "../../lighthouse/environment" } @@ -22,7 +22,7 @@ merkle_proof = { path = "../../consensus/merkle_proof"} eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" tree_hash = "0.4.1" -parking_lot = "0.11.0" +parking_lot = "0.12.0" slog = "2.5.2" tokio = { version = "1.14.0", features = ["full"] } state_processing = { path = "../../consensus/state_processing" } diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index 876613a2ae..71b1b5b4b2 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -358,7 +358,7 @@ pub async fn get_deposit_logs_in_range( }]); let response_body = send_rpc_request(endpoint, "eth_getLogs", params, timeout).await?; - Ok(response_result_or_error(&response_body) + response_result_or_error(&response_body) .map_err(|e| format!("eth_getLogs failed: {}", e))? .as_array() .cloned() @@ -383,7 +383,7 @@ pub async fn get_deposit_logs_in_range( }) }) .collect::, String>>() - .map_err(|e| format!("Failed to get logs in range: {}", e))?) + .map_err(|e| format!("Failed to get logs in range: {}", e)) } /// Sends an RPC request to `endpoint`, using a POST with the given `body`. diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index b12d30ea2c..0351b5e433 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -17,7 +17,7 @@ eth2_serde_utils = "0.1.1" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } eth1 = { path = "../eth1" } -warp = { git = "https://github.com/macladson/warp", rev ="dfa259e", features = ["tls"] } +warp = { version = "0.3.2", features = ["tls"] } jsonwebtoken = "8" environment = { path = "../../lighthouse/environment" } bytes = "1.1.0" @@ -28,10 +28,11 @@ lru = "0.7.1" exit-future = "0.2.0" tree_hash = "0.4.1" tree_hash_derive = { path = "../../consensus/tree_hash_derive"} -parking_lot = "0.11.0" +parking_lot = "0.12.0" slot_clock = { path = "../../common/slot_clock" } tempfile = "3.1.0" -rand = "0.7.3" +rand = "0.8.5" zeroize = { version = "1.4.2", features = ["zeroize_derive"] } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lazy_static = "1.4.0" \ No newline at end of file +lazy_static = "1.4.0" +ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 51c689ac62..64bc948c00 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,18 +1,23 @@ +use crate::engines::ForkChoiceState; use async_trait::async_trait; use eth1::http::RpcError; +pub use ethers_core::types::Transaction; +pub use json_structures::TransitionConfigurationV1; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; - -pub const LATEST_TAG: &str = "latest"; - -use crate::engines::ForkChoiceState; -pub use json_structures::TransitionConfigurationV1; -pub use types::{Address, EthSpec, ExecutionBlockHash, ExecutionPayload, Hash256, Uint256}; +use slog::Logger; +use ssz_types::FixedVector; +pub use types::{ + Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, Hash256, + Uint256, VariableList, +}; pub mod auth; pub mod http; pub mod json_structures; +pub const LATEST_TAG: &str = "latest"; + pub type PayloadId = [u8; 8]; #[derive(Debug)] @@ -24,7 +29,10 @@ pub enum Error { InvalidExecutePayloadResponse(&'static str), JsonRpc(RpcError), Json(serde_json::Error), - ServerMessage { code: i64, message: String }, + ServerMessage { + code: i64, + message: String, + }, Eip155Failure, IsSyncing, ExecutionBlockNotFound(ExecutionBlockHash), @@ -32,6 +40,16 @@ pub enum Error { ParentHashEqualsBlockHash(ExecutionBlockHash), PayloadIdUnavailable, TransitionConfigurationMismatch, + PayloadConversionLogicFlaw, + InvalidBuilderQuery, + MissingPayloadId { + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + }, + DeserializeTransaction(ssz_types::Error), + DeserializeTransactions(ssz_types::Error), } impl From for Error { @@ -59,41 +77,17 @@ impl From for Error { } } -/// A generic interface for an execution engine API. +pub struct EngineApi; +pub struct BuilderApi; + #[async_trait] -pub trait EngineApi { - async fn upcheck(&self) -> Result<(), Error>; - - async fn get_block_by_number<'a>( - &self, - block_by_number: BlockByNumberQuery<'a>, - ) -> Result, Error>; - - async fn get_block_by_hash<'a>( - &self, - block_hash: ExecutionBlockHash, - ) -> Result, Error>; - - async fn new_payload_v1( - &self, - execution_payload: ExecutionPayload, - ) -> Result; - - async fn get_payload_v1( - &self, - payload_id: PayloadId, - ) -> Result, Error>; - - async fn forkchoice_updated_v1( +pub trait Builder { + async fn notify_forkchoice_updated( &self, forkchoice_state: ForkChoiceState, payload_attributes: Option, + log: &Logger, ) -> Result; - - async fn exchange_transition_configuration_v1( - &self, - transition_configuration: TransitionConfigurationV1, - ) -> Result; } #[derive(Clone, Copy, Debug, PartialEq)] @@ -119,6 +113,9 @@ pub enum BlockByNumberQuery<'a> { Tag(&'a str), } +/// Representation of an exection block with enough detail to determine the terminal PoW block. +/// +/// See `get_pow_block_hash_at_total_difficulty`. #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ExecutionBlock { @@ -130,6 +127,35 @@ pub struct ExecutionBlock { pub total_difficulty: Uint256, } +/// Representation of an exection block with enough detail to reconstruct a payload. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExecutionBlockWithTransactions { + pub parent_hash: ExecutionBlockHash, + #[serde(alias = "miner")] + pub fee_recipient: Address, + pub state_root: Hash256, + pub receipts_root: Hash256, + #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] + pub logs_bloom: FixedVector, + #[serde(alias = "mixHash")] + pub prev_randao: Hash256, + #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + pub block_number: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub gas_limit: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub gas_used: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub timestamp: u64, + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub extra_data: VariableList, + pub base_fee_per_gas: Uint256, + #[serde(rename = "hash")] + pub block_hash: ExecutionBlockHash, + pub transactions: Vec, +} + #[derive(Clone, Copy, Debug, PartialEq)] pub struct PayloadAttributes { pub timestamp: u64, @@ -142,3 +168,17 @@ pub struct ForkchoiceUpdatedResponse { pub payload_status: PayloadStatusV1, pub payload_id: Option, } + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum ProposeBlindedBlockResponseStatus { + Valid, + Invalid, + Syncing, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct ProposeBlindedBlockResponse { + pub status: ProposeBlindedBlockResponseStatus, + pub latest_valid_hash: Option, + pub validation_error: Option, +} diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 9880304d69..179045ccf8 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -3,14 +3,14 @@ use super::*; use crate::auth::Auth; use crate::json_structures::*; -use async_trait::async_trait; use eth1::http::EIP155_ERROR_STR; use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; +use std::marker::PhantomData; use std::time::Duration; -use types::EthSpec; +use types::{BlindedPayload, EthSpec, ExecutionPayloadHeader, SignedBeaconBlock}; pub use reqwest::Client; @@ -29,31 +29,39 @@ pub const ETH_SYNCING: &str = "eth_syncing"; pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_millis(250); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; -pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); +pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(6); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; -pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_millis(500); +pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(6); pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = "engine_exchangeTransitionConfigurationV1"; pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_millis(500); -pub struct HttpJsonRpc { +pub const BUILDER_GET_PAYLOAD_HEADER_V1: &str = "builder_getPayloadHeaderV1"; +pub const BUILDER_GET_PAYLOAD_HEADER_TIMEOUT: Duration = Duration::from_secs(2); + +pub const BUILDER_PROPOSE_BLINDED_BLOCK_V1: &str = "builder_proposeBlindedBlockV1"; +pub const BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT: Duration = Duration::from_secs(2); + +pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, auth: Option, + _phantom: PhantomData, } -impl HttpJsonRpc { +impl HttpJsonRpc { pub fn new(url: SensitiveUrl) -> Result { Ok(Self { client: Client::builder().build()?, url, auth: None, + _phantom: PhantomData, }) } @@ -62,20 +70,21 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, auth: Some(auth), + _phantom: PhantomData, }) } - pub async fn rpc_request( + pub async fn rpc_request( &self, method: &str, params: serde_json::Value, timeout: Duration, - ) -> Result { + ) -> Result { let body = JsonRequestBody { jsonrpc: JSONRPC_VERSION, method, params, - id: STATIC_ID, + id: json!(STATIC_ID), }; let mut request = self @@ -108,9 +117,8 @@ impl HttpJsonRpc { } } -#[async_trait] -impl EngineApi for HttpJsonRpc { - async fn upcheck(&self) -> Result<(), Error> { +impl HttpJsonRpc { + pub async fn upcheck(&self) -> Result<(), Error> { let result: serde_json::Value = self .rpc_request(ETH_SYNCING, json!([]), ETH_SYNCING_TIMEOUT) .await?; @@ -127,7 +135,7 @@ impl EngineApi for HttpJsonRpc { } } - async fn get_block_by_number<'a>( + pub async fn get_block_by_number<'a>( &self, query: BlockByNumberQuery<'a>, ) -> Result, Error> { @@ -141,7 +149,7 @@ impl EngineApi for HttpJsonRpc { .await } - async fn get_block_by_hash<'a>( + pub async fn get_block_by_hash( &self, block_hash: ExecutionBlockHash, ) -> Result, Error> { @@ -151,7 +159,16 @@ impl EngineApi for HttpJsonRpc { .await } - async fn new_payload_v1( + pub async fn get_block_by_hash_with_txns( + &self, + block_hash: ExecutionBlockHash, + ) -> Result>, Error> { + let params = json!([block_hash, true]); + self.rpc_request(ETH_GET_BLOCK_BY_HASH, params, ETH_GET_BLOCK_BY_HASH_TIMEOUT) + .await + } + + pub async fn new_payload_v1( &self, execution_payload: ExecutionPayload, ) -> Result { @@ -164,7 +181,7 @@ impl EngineApi for HttpJsonRpc { Ok(response.into()) } - async fn get_payload_v1( + pub async fn get_payload_v1( &self, payload_id: PayloadId, ) -> Result, Error> { @@ -177,7 +194,7 @@ impl EngineApi for HttpJsonRpc { Ok(response.into()) } - async fn forkchoice_updated_v1( + pub async fn forkchoice_updated_v1( &self, forkchoice_state: ForkChoiceState, payload_attributes: Option, @@ -198,7 +215,7 @@ impl EngineApi for HttpJsonRpc { Ok(response.into()) } - async fn exchange_transition_configuration_v1( + pub async fn exchange_transition_configuration_v1( &self, transition_configuration: TransitionConfigurationV1, ) -> Result { @@ -216,6 +233,62 @@ impl EngineApi for HttpJsonRpc { } } +impl HttpJsonRpc { + pub async fn get_payload_header_v1( + &self, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + let response: JsonExecutionPayloadHeaderV1 = self + .rpc_request( + BUILDER_GET_PAYLOAD_HEADER_V1, + params, + BUILDER_GET_PAYLOAD_HEADER_TIMEOUT, + ) + .await?; + + Ok(response.into()) + } + + pub async fn forkchoice_updated_v1( + &self, + forkchoice_state: ForkChoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkChoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributesV1::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V1, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT, + ) + .await?; + + Ok(response.into()) + } + + pub async fn propose_blinded_block_v1( + &self, + block: SignedBeaconBlock>, + ) -> Result, Error> { + let params = json!([block]); + + let response: JsonExecutionPayloadV1 = self + .rpc_request( + BUILDER_PROPOSE_BLINDED_BLOCK_V1, + params, + BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT, + ) + .await?; + + Ok(response.into()) + } +} #[cfg(test)] mod test { use super::auth::JwtKey; @@ -224,7 +297,7 @@ mod test { use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{MainnetEthSpec, Transaction, Unsigned, VariableList}; + use types::{MainnetEthSpec, Transactions, Unsigned, VariableList}; struct Tester { server: MockServer, @@ -326,10 +399,7 @@ mod test { const LOGS_BLOOM_01: &str = "0x01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101"; fn encode_transactions( - transactions: VariableList< - Transaction, - E::MaxTransactionsPerPayload, - >, + transactions: Transactions, ) -> Result { let ep: JsonExecutionPayloadV1 = JsonExecutionPayloadV1 { transactions, @@ -341,10 +411,7 @@ mod test { fn decode_transactions( transactions: serde_json::Value, - ) -> Result< - VariableList, E::MaxTransactionsPerPayload>, - serde_json::Error, - > { + ) -> Result, serde_json::Error> { let mut json = json!({ "parentHash": HASH_00, "feeRecipient": ADDRESS_01, @@ -370,7 +437,7 @@ mod test { fn assert_transactions_serde( name: &str, - as_obj: VariableList, E::MaxTransactionsPerPayload>, + as_obj: Transactions, as_json: serde_json::Value, ) { assert_eq!( @@ -388,9 +455,7 @@ mod test { } /// Example: if `spec == &[1, 1]`, then two one-byte transactions will be created. - fn generate_transactions( - spec: &[usize], - ) -> VariableList, E::MaxTransactionsPerPayload> { + fn generate_transactions(spec: &[usize]) -> Transactions { let mut txs = VariableList::default(); for &num_bytes in spec { @@ -860,7 +925,7 @@ mod test { extra_data: vec![].into(), base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), - transactions: vec![].into(), + transactions: vec![].into(), }; assert_eq!(payload, expected); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 600e359bb1..3ebe82602f 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,7 +1,9 @@ use super::*; use serde::{Deserialize, Serialize}; use ssz_types::FixedVector; -use types::{EthSpec, ExecutionBlockHash, Transaction, Unsigned, VariableList}; +use types::{ + EthSpec, ExecutionBlockHash, ExecutionPayloadHeader, Transaction, Unsigned, VariableList, +}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -9,7 +11,7 @@ pub struct JsonRequestBody<'a> { pub jsonrpc: &'a str, pub method: &'a str, pub params: serde_json::Value, - pub id: u32, + pub id: serde_json::Value, } #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -26,7 +28,7 @@ pub struct JsonResponseBody { pub error: Option, #[serde(default)] pub result: serde_json::Value, - pub id: u32, + pub id: serde_json::Value, } #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -56,6 +58,70 @@ pub struct JsonPayloadIdResponse { pub payload_id: PayloadId, } +#[derive(Debug, PartialEq, Default, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec", rename_all = "camelCase")] +pub struct JsonExecutionPayloadHeaderV1 { + pub parent_hash: ExecutionBlockHash, + pub fee_recipient: Address, + pub state_root: Hash256, + pub receipts_root: Hash256, + #[serde(with = "serde_logs_bloom")] + pub logs_bloom: FixedVector, + pub prev_randao: Hash256, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub block_number: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub gas_limit: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub gas_used: u64, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub timestamp: u64, + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub extra_data: VariableList, + pub base_fee_per_gas: Uint256, + pub block_hash: ExecutionBlockHash, + pub transactions_root: Hash256, +} + +impl From> for ExecutionPayloadHeader { + fn from(e: JsonExecutionPayloadHeaderV1) -> Self { + // Use this verbose deconstruction pattern to ensure no field is left unused. + let JsonExecutionPayloadHeaderV1 { + parent_hash, + fee_recipient, + state_root, + receipts_root, + logs_bloom, + prev_randao, + block_number, + gas_limit, + gas_used, + timestamp, + extra_data, + base_fee_per_gas, + block_hash, + transactions_root, + } = e; + + Self { + parent_hash, + fee_recipient, + state_root, + receipts_root, + logs_bloom, + prev_randao, + block_number, + gas_limit, + gas_used, + timestamp, + extra_data, + base_fee_per_gas, + block_hash, + transactions_root, + } + } +} + #[derive(Debug, PartialEq, Default, Serialize, Deserialize)] #[serde(bound = "T: EthSpec", rename_all = "camelCase")] pub struct JsonExecutionPayloadV1 { @@ -78,7 +144,7 @@ pub struct JsonExecutionPayloadV1 { pub extra_data: VariableList, pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, - #[serde(with = "serde_transactions")] + #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: VariableList, T::MaxTransactionsPerPayload>, } @@ -364,6 +430,59 @@ impl From for JsonForkchoiceUpdatedV1Response { } } +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum JsonProposeBlindedBlockResponseStatus { + Valid, + Invalid, + Syncing, +} +#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(bound = "E: EthSpec")] +pub struct JsonProposeBlindedBlockResponse { + pub result: ExecutionPayload, + pub error: Option, +} + +impl From> for ExecutionPayload { + fn from(j: JsonProposeBlindedBlockResponse) -> Self { + let JsonProposeBlindedBlockResponse { result, error: _ } = j; + result + } +} + +impl From for ProposeBlindedBlockResponseStatus { + fn from(j: JsonProposeBlindedBlockResponseStatus) -> Self { + match j { + JsonProposeBlindedBlockResponseStatus::Valid => { + ProposeBlindedBlockResponseStatus::Valid + } + JsonProposeBlindedBlockResponseStatus::Invalid => { + ProposeBlindedBlockResponseStatus::Invalid + } + JsonProposeBlindedBlockResponseStatus::Syncing => { + ProposeBlindedBlockResponseStatus::Syncing + } + } + } +} +impl From for JsonProposeBlindedBlockResponseStatus { + fn from(f: ProposeBlindedBlockResponseStatus) -> Self { + match f { + ProposeBlindedBlockResponseStatus::Valid => { + JsonProposeBlindedBlockResponseStatus::Valid + } + ProposeBlindedBlockResponseStatus::Invalid => { + JsonProposeBlindedBlockResponseStatus::Invalid + } + ProposeBlindedBlockResponseStatus::Syncing => { + JsonProposeBlindedBlockResponseStatus::Syncing + } + } + } +} + #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { @@ -401,75 +520,3 @@ pub mod serde_logs_bloom { .map_err(|e| serde::de::Error::custom(format!("invalid logs bloom: {:?}", e))) } } - -/// Serializes the `transactions` field of an `ExecutionPayload`. -pub mod serde_transactions { - use super::*; - use eth2_serde_utils::hex; - use serde::ser::SerializeSeq; - use serde::{de, Deserializer, Serializer}; - use std::marker::PhantomData; - - type Value = VariableList, N>; - - #[derive(Default)] - pub struct ListOfBytesListVisitor { - _phantom_m: PhantomData, - _phantom_n: PhantomData, - } - - impl<'a, M: Unsigned, N: Unsigned> serde::de::Visitor<'a> for ListOfBytesListVisitor { - type Value = Value; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed byte lists") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut outer = VariableList::default(); - - while let Some(val) = seq.next_element::()? { - let inner_vec = hex::decode(&val).map_err(de::Error::custom)?; - let transaction = VariableList::new(inner_vec).map_err(|e| { - serde::de::Error::custom(format!("transaction too large: {:?}", e)) - })?; - outer.push(transaction).map_err(|e| { - serde::de::Error::custom(format!("too many transactions: {:?}", e)) - })?; - } - - Ok(outer) - } - } - - pub fn serialize( - value: &Value, - serializer: S, - ) -> Result - where - S: Serializer, - { - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for transaction in value { - // It's important to match on the inner values of the transaction. Serializing the - // entire `Transaction` will result in appending the SSZ union prefix byte. The - // execution node does not want that. - let hex = hex::encode(&transaction[..]); - seq.serialize_element(&hex)?; - } - seq.end() - } - - pub fn deserialize<'de, D, M: Unsigned, N: Unsigned>( - deserializer: D, - ) -> Result, D::Error> - where - D: Deserializer<'de>, - { - let visitor: ListOfBytesListVisitor = <_>::default(); - deserializer.deserialize_any(visitor) - } -} diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index a2c40ceb33..719db74c54 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,8 +1,11 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, + Builder, EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, + PayloadId, }; +use crate::{BuilderApi, HttpJsonRpc}; +use async_trait::async_trait; use futures::future::join_all; use lru::LruCache; use slog::{crit, debug, info, warn, Logger}; @@ -58,14 +61,14 @@ struct PayloadIdCacheKey { /// An execution engine. pub struct Engine { pub id: String, - pub api: T, + pub api: HttpJsonRpc, payload_id_cache: Mutex>, state: RwLock, } impl Engine { /// Creates a new, offline engine. - pub fn new(id: String, api: T) -> Self { + pub fn new(id: String, api: HttpJsonRpc) -> Self { Self { id, api, @@ -94,8 +97,9 @@ impl Engine { } } -impl Engine { - pub async fn notify_forkchoice_updated( +#[async_trait] +impl Builder for Engine { + async fn notify_forkchoice_updated( &self, forkchoice_state: ForkChoiceState, payload_attributes: Option, @@ -124,14 +128,47 @@ impl Engine { } } +#[async_trait] +impl Builder for Engine { + async fn notify_forkchoice_updated( + &self, + forkchoice_state: ForkChoiceState, + pa: Option, + log: &Logger, + ) -> Result { + let payload_attributes = pa.ok_or(EngineApiError::InvalidBuilderQuery)?; + let response = self + .api + .forkchoice_updated_v1(forkchoice_state, Some(payload_attributes)) + .await?; + + if let Some(payload_id) = response.payload_id { + let key = PayloadIdCacheKey::new(&forkchoice_state, &payload_attributes); + self.payload_id_cache.lock().await.put(key, payload_id); + } else { + warn!( + log, + "Builder should have returned a payload_id for attributes {:?}", payload_attributes + ); + } + + Ok(response) + } +} + /// Holds multiple execution engines and provides functionality for managing them in a fallback /// manner. -pub struct Engines { - pub engines: Vec>, +pub struct Engines { + pub engines: Vec>, pub latest_forkchoice_state: RwLock>, pub log: Logger, } +pub struct Builders { + pub builders: Vec>, + pub log: Logger, +} + #[derive(Debug)] pub enum EngineError { Offline { id: String }, @@ -139,7 +176,7 @@ pub enum EngineError { Auth { id: String }, } -impl Engines { +impl Engines { async fn get_latest_forkchoice_state(&self) -> Option { *self.latest_forkchoice_state.read().await } @@ -148,7 +185,7 @@ impl Engines { *self.latest_forkchoice_state.write().await = Some(state); } - async fn send_latest_forkchoice_state(&self, engine: &Engine) { + async fn send_latest_forkchoice_state(&self, engine: &Engine) { let latest_forkchoice_state = self.get_latest_forkchoice_state().await; if let Some(forkchoice_state) = latest_forkchoice_state { @@ -286,7 +323,7 @@ impl Engines { /// it runs, it will try to upcheck all offline nodes and then run the function again. pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result> where - F: Fn(&'a Engine) -> G + Copy, + F: Fn(&'a Engine) -> G + Copy, G: Future>, { match self.first_success_without_retry(func).await { @@ -308,12 +345,12 @@ impl Engines { /// Run `func` on all engines, in the order in which they are defined, returning the first /// successful result that is found. - async fn first_success_without_retry<'a, F, G, H>( + pub async fn first_success_without_retry<'a, F, G, H>( &'a self, func: F, ) -> Result> where - F: Fn(&'a Engine) -> G, + F: Fn(&'a Engine) -> G, G: Future>, { let mut errors = vec![]; @@ -364,7 +401,7 @@ impl Engines { /// it runs, it will try to upcheck all offline nodes and then run the function again. pub async fn broadcast<'a, F, G, H>(&'a self, func: F) -> Vec> where - F: Fn(&'a Engine) -> G + Copy, + F: Fn(&'a Engine) -> G + Copy, G: Future>, { let first_results = self.broadcast_without_retry(func).await; @@ -392,25 +429,29 @@ impl Engines { func: F, ) -> Vec> where - F: Fn(&'a Engine) -> G, + F: Fn(&'a Engine) -> G, G: Future>, { let func = &func; let futures = self.engines.iter().map(|engine| async move { let is_offline = *engine.state.read().await == EngineState::Offline; if !is_offline { - func(engine).await.map_err(|error| { - debug!( - self.log, - "Execution engine call failed"; - "error" => ?error, - "id" => &engine.id - ); - EngineError::Api { - id: engine.id.clone(), - error, + match func(engine).await { + Ok(res) => Ok(res), + Err(error) => { + debug!( + self.log, + "Execution engine call failed"; + "error" => ?error, + "id" => &engine.id + ); + *engine.state.write().await = EngineState::Offline; + Err(EngineError::Api { + id: engine.id.clone(), + error, + }) } - }) + } } else { Err(EngineError::Offline { id: engine.id.clone(), @@ -422,6 +463,66 @@ impl Engines { } } +impl Builders { + pub async fn first_success_without_retry<'a, F, G, H>( + &'a self, + func: F, + ) -> Result> + where + F: Fn(&'a Engine) -> G, + G: Future>, + { + let mut errors = vec![]; + + for builder in &self.builders { + match func(builder).await { + Ok(result) => return Ok(result), + Err(error) => { + debug!( + self.log, + "Builder call failed"; + "error" => ?error, + "id" => &builder.id + ); + errors.push(EngineError::Api { + id: builder.id.clone(), + error, + }) + } + } + } + + Err(errors) + } + + pub async fn broadcast_without_retry<'a, F, G, H>( + &'a self, + func: F, + ) -> Vec> + where + F: Fn(&'a Engine) -> G, + G: Future>, + { + let func = &func; + let futures = self.builders.iter().map(|engine| async move { + func(engine).await.map_err(|error| { + debug!( + self.log, + "Builder call failed"; + "error" => ?error, + "id" => &engine.id + ); + EngineError::Api { + id: engine.id.clone(), + error, + } + }) + }); + + join_all(futures).await + } +} + impl PayloadIdCacheKey { fn new(state: &ForkChoiceState, attributes: &PayloadAttributes) -> Self { Self { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index ba4208d88a..5aa4edd74a 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,16 +4,23 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. +use crate::engine_api::Builder; +use crate::engines::Builders; use auth::{Auth, JwtKey}; -use engine_api::{Error as ApiError, *}; -use engines::{Engine, EngineError, Engines, ForkChoiceState, Logging}; +use engine_api::Error as ApiError; +pub use engine_api::*; +pub use engine_api::{http, http::HttpJsonRpc}; +pub use engines::ForkChoiceState; +use engines::{Engine, EngineError, Engines, Logging}; use lru::LruCache; use payload_status::process_multiple_payload_statuses; +pub use payload_status::PayloadStatus; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, trace, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; +use std::convert::TryInto; use std::future::Future; use std::io::Write; use std::path::PathBuf; @@ -24,12 +31,10 @@ use tokio::{ sync::{Mutex, MutexGuard, RwLock}, time::{sleep, sleep_until, Instant}, }; -use types::{ChainSpec, Epoch, ExecutionBlockHash, ProposerPreparationData, Slot}; - -pub use engine_api::{ - http::HttpJsonRpc, json_structures, PayloadAttributes, PayloadStatusV1Status, +use types::{ + BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, + ProposerPreparationData, SignedBeaconBlock, Slot, }; -pub use payload_status::PayloadStatus; mod engine_api; mod engines; @@ -59,6 +64,7 @@ const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); #[derive(Debug)] pub enum Error { NoEngines, + NoPayloadBuilder, ApiError(ApiError), EngineErrors(Vec), NotSynced, @@ -94,7 +100,8 @@ pub struct Proposer { } struct Inner { - engines: Engines, + engines: Engines, + builders: Builders, execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option
, proposer_preparation_data: Mutex>, @@ -108,6 +115,8 @@ struct Inner { pub struct Config { /// Endpoint urls for EL nodes that are running the engine api. pub execution_endpoints: Vec, + /// Endpoint urls for services providing the builder api. + pub builder_endpoints: Vec, /// JWT secrets for the above endpoints running the engine api. pub secret_files: Vec, /// The default fee recipient to use on the beacon node if none if provided from @@ -148,6 +157,7 @@ impl ExecutionLayer { pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { let Config { execution_endpoints: urls, + builder_endpoints: builder_urls, mut secret_files, suggested_fee_recipient, jwt_id, @@ -177,7 +187,7 @@ impl ExecutionLayer { }) .and_then(|ref s| { let secret = JwtKey::from_slice( - &hex::decode(strip_prefix(s)) + &hex::decode(strip_prefix(s.trim_end())) .map_err(|e| format!("Invalid hex string: {:?}", e))?, )?; Ok((secret, p.to_path_buf())) @@ -203,15 +213,24 @@ impl ExecutionLayer { .collect::>() .map_err(Error::InvalidJWTSecret)?; - let engines: Vec> = urls + let engines: Vec> = urls .into_iter() .zip(secrets.into_iter()) .map(|(url, (secret, path))| { let id = url.to_string(); let auth = Auth::new(secret, jwt_id.clone(), jwt_version.clone()); debug!(log, "Loaded execution endpoint"; "endpoint" => %id, "jwt_path" => ?path); - let api = HttpJsonRpc::new_with_auth(url, auth)?; - Ok(Engine::new(id, api)) + let api = HttpJsonRpc::::new_with_auth(url, auth)?; + Ok(Engine::::new(id, api)) + }) + .collect::>()?; + + let builders: Vec> = builder_urls + .into_iter() + .map(|url| { + let id = url.to_string(); + let api = HttpJsonRpc::::new(url)?; + Ok(Engine::::new(id, api)) }) .collect::>()?; @@ -221,6 +240,10 @@ impl ExecutionLayer { latest_forkchoice_state: <_>::default(), log: log.clone(), }, + builders: Builders { + builders, + log: log.clone(), + }, execution_engine_forkchoice_lock: <_>::default(), suggested_fee_recipient, proposer_preparation_data: Mutex::new(HashMap::new()), @@ -237,11 +260,15 @@ impl ExecutionLayer { } impl ExecutionLayer { - fn engines(&self) -> &Engines { + fn engines(&self) -> &Engines { &self.inner.engines } - fn executor(&self) -> &TaskExecutor { + fn builders(&self) -> &Builders { + &self.inner.builders + } + + pub fn executor(&self) -> &TaskExecutor { &self.inner.executor } @@ -277,11 +304,7 @@ impl ExecutionLayer { T: Fn(&'a Self) -> U, U: Future>, { - let runtime = self - .executor() - .runtime() - .upgrade() - .ok_or(Error::ShuttingDown)?; + let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; // TODO(merge): respect the shutdown signal. runtime.block_on(generate_future(self)) } @@ -295,11 +318,7 @@ impl ExecutionLayer { T: Fn(&'a Self) -> U, U: Future, { - let runtime = self - .executor() - .runtime() - .upgrade() - .ok_or(Error::ShuttingDown)?; + let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; // TODO(merge): respect the shutdown signal. Ok(runtime.block_on(generate_future(self))) } @@ -542,14 +561,14 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - pub async fn get_payload( + pub async fn get_payload>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, finalized_block_hash: ExecutionBlockHash, proposer_index: u64, - ) -> Result, Error> { + ) -> Result { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::GET_PAYLOAD], @@ -557,72 +576,124 @@ impl ExecutionLayer { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; - debug!( - self.log(), - "Issuing engine_getPayload"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, - ); - self.engines() - .first_success(|engine| async move { - let payload_id = if let Some(id) = engine - .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) + match Payload::block_type() { + BlockType::Blinded => { + debug!( + self.log(), + "Issuing builder_getPayloadHeader"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + self.builders() + .first_success_without_retry(|engine| async move { + let payload_id = engine + .get_payload_id( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + ) + .await + .ok_or(ApiError::MissingPayloadId { + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + })?; + engine + .api + .get_payload_header_v1::(payload_id) + .await? + .try_into() + .map_err(|_| ApiError::PayloadConversionLogicFlaw) + }) .await - { - // The payload id has been cached for this engine. - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, - &[metrics::HIT], - ); - id - } else { - // The payload id has *not* been cached for this engine. Trigger an artificial - // fork choice update to retrieve a payload ID. - // - // TODO(merge): a better algorithm might try to favour a node that already had a - // cached payload id, since a payload that has had more time to produce is - // likely to be more profitable. - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, - &[metrics::MISS], - ); - let fork_choice_state = ForkChoiceState { - head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash, - }; - let payload_attributes = PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient, - }; - - engine - .notify_forkchoice_updated( - fork_choice_state, - Some(payload_attributes), - self.log(), - ) - .await - .map(|response| response.payload_id)? - .ok_or_else(|| { - error!( - self.log(), - "Exec engine unable to produce payload"; - "msg" => "No payload ID, the engine is likely syncing. \ - This has the potential to cause a missed block proposal.", + .map_err(Error::EngineErrors) + } + BlockType::Full => { + debug!( + self.log(), + "Issuing engine_getPayload"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + self.engines() + .first_success(|engine| async move { + let payload_id = if let Some(id) = engine + .get_payload_id( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + ) + .await + { + // The payload id has been cached for this engine. + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, + &[metrics::HIT], ); + id + } else { + // The payload id has *not* been cached for this engine. Trigger an artificial + // fork choice update to retrieve a payload ID. + // + // TODO(merge): a better algorithm might try to favour a node that already had a + // cached payload id, since a payload that has had more time to produce is + // likely to be more profitable. + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, + &[metrics::MISS], + ); + let fork_choice_state = ForkChoiceState { + head_block_hash: parent_hash, + safe_block_hash: parent_hash, + finalized_block_hash, + }; + let payload_attributes = PayloadAttributes { + timestamp, + prev_randao, + suggested_fee_recipient, + }; - ApiError::PayloadIdUnavailable - })? - }; + let response = engine + .notify_forkchoice_updated( + fork_choice_state, + Some(payload_attributes), + self.log(), + ) + .await?; - engine.api.get_payload_v1(payload_id).await - }) - .await - .map_err(Error::EngineErrors) + match response.payload_id { + Some(payload_id) => payload_id, + None => { + error!( + self.log(), + "Exec engine unable to produce payload"; + "msg" => "No payload ID, the engine is likely syncing. \ + This has the potential to cause a missed block \ + proposal.", + "status" => ?response.payload_status + ); + return Err(ApiError::PayloadIdUnavailable); + } + } + }; + + engine + .api + .get_payload_v1::(payload_id) + .await + .map(Into::into) + }) + .await + .map_err(Error::EngineErrors) + } + } } /// Maps to the `engine_newPayload` JSON-RPC call. @@ -801,10 +872,23 @@ impl ExecutionLayer { }) .await; + // Only query builders with payload attributes populated. + let builder_broadcast_results = if payload_attributes.is_some() { + self.builders() + .broadcast_without_retry(|engine| async move { + engine + .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) + .await + }) + .await + } else { + vec![] + }; process_multiple_payload_statuses( head_block_hash, broadcast_results .into_iter() + .chain(builder_broadcast_results.into_iter()) .map(|result| result.map(|response| response.payload_status)), self.log(), ) @@ -931,7 +1015,7 @@ impl ExecutionLayer { /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md async fn get_pow_block_hash_at_total_difficulty( &self, - engine: &Engine, + engine: &Engine, spec: &ChainSpec, ) -> Result, ApiError> { let mut block = engine @@ -1013,7 +1097,6 @@ impl ExecutionLayer { )); } } - Ok(None) }) .await; @@ -1076,7 +1159,7 @@ impl ExecutionLayer { /// https://github.com/ethereum/consensus-specs/issues/2636 async fn get_pow_block( &self, - engine: &Engine, + engine: &Engine, hash: ExecutionBlockHash, ) -> Result, ApiError> { if let Some(cached) = self.execution_blocks().await.get(&hash).copied() { @@ -1094,19 +1177,96 @@ impl ExecutionLayer { Ok(None) } } + + pub async fn get_payload_by_block_hash( + &self, + hash: ExecutionBlockHash, + ) -> Result>, Error> { + self.engines() + .first_success(|engine| async move { + self.get_payload_by_block_hash_from_engine(engine, hash) + .await + }) + .await + .map_err(Error::EngineErrors) + } + + async fn get_payload_by_block_hash_from_engine( + &self, + engine: &Engine, + hash: ExecutionBlockHash, + ) -> Result>, ApiError> { + let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); + + if hash == ExecutionBlockHash::zero() { + return Ok(Some(ExecutionPayload::default())); + } + + let block = if let Some(block) = engine.api.get_block_by_hash_with_txns::(hash).await? { + block + } else { + return Ok(None); + }; + + let transactions = VariableList::new( + block + .transactions + .into_iter() + .map(|transaction| VariableList::new(transaction.rlp().to_vec())) + .collect::>() + .map_err(ApiError::DeserializeTransaction)?, + ) + .map_err(ApiError::DeserializeTransactions)?; + + Ok(Some(ExecutionPayload { + parent_hash: block.parent_hash, + fee_recipient: block.fee_recipient, + state_root: block.state_root, + receipts_root: block.receipts_root, + logs_bloom: block.logs_bloom, + prev_randao: block.prev_randao, + block_number: block.block_number, + gas_limit: block.gas_limit, + gas_used: block.gas_used, + timestamp: block.timestamp, + extra_data: block.extra_data, + base_fee_per_gas: block.base_fee_per_gas, + block_hash: block.block_hash, + transactions, + })) + } + + pub async fn propose_blinded_beacon_block( + &self, + block: &SignedBeaconBlock>, + ) -> Result, Error> { + debug!( + self.log(), + "Issuing builder_proposeBlindedBlock"; + "root" => ?block.canonical_root(), + ); + self.builders() + .first_success_without_retry(|engine| async move { + engine.api.propose_blinded_block_v1(block.clone()).await + }) + .await + .map_err(Error::EngineErrors) + } } #[cfg(test)] mod test { use super::*; use crate::test_utils::MockExecutionLayer as GenericMockExecutionLayer; + use task_executor::test_utils::TestRuntime; use types::MainnetEthSpec; type MockExecutionLayer = GenericMockExecutionLayer; #[tokio::test] async fn produce_three_valid_pos_execution_blocks() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .produce_valid_execution_payload_on_head() .await @@ -1118,7 +1278,8 @@ mod test { #[tokio::test] async fn finds_valid_terminal_block_hash() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_block_prior_to_terminal_block() .with_terminal_block(|spec, el, _| async move { el.engines().upcheck_not_synced(Logging::Disabled).await; @@ -1137,7 +1298,8 @@ mod test { #[tokio::test] async fn verifies_valid_terminal_block_hash() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { el.engines().upcheck_not_synced(Logging::Disabled).await; @@ -1153,7 +1315,8 @@ mod test { #[tokio::test] async fn rejects_invalid_terminal_block_hash() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { el.engines().upcheck_not_synced(Logging::Disabled).await; @@ -1171,7 +1334,8 @@ mod test { #[tokio::test] async fn rejects_unknown_terminal_block_hash() { - MockExecutionLayer::default_params() + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, _| async move { el.engines().upcheck_not_synced(Logging::Disabled).await; diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 4a761c8e46..356c5a46dd 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -31,4 +31,8 @@ lazy_static::lazy_static! { "Indicates hits or misses for already having prepared a payload id before payload production", &["event"] ); + pub static ref EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH: Result = try_create_histogram( + "execution_layer_get_payload_by_block_hash_time", + "Time to reconstruct a payload from the EE using eth_getBlockByHash" + ); } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 61038f40af..772ac3c866 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -100,7 +100,9 @@ pub async fn handle_rpc( let forkchoice_state: JsonForkChoiceStateV1 = get_param(params, 0)?; let payload_attributes: Option = get_param(params, 1)?; - let response = ctx + let head_block_hash = forkchoice_state.head_block_hash; + + let mut response = ctx .execution_block_generator .write() .forkchoice_updated_v1( @@ -108,6 +110,14 @@ pub async fn handle_rpc( payload_attributes.map(|json| json.into()), )?; + if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() { + if status.status == PayloadStatusV1Status::Valid { + status.latest_valid_hash = Some(head_block_hash) + } + + response.payload_status = status.into(); + } + Ok(serde_json::to_value(response).unwrap()) } other => Err(format!( diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index cf8c8516f6..5770a8a382 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -2,61 +2,22 @@ use crate::{ test_utils::{MockServer, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, JWT_SECRET}, Config, *, }; -use environment::null_logger; use sensitive_url::SensitiveUrl; -use std::sync::Arc; use task_executor::TaskExecutor; use tempfile::NamedTempFile; -use types::{Address, ChainSpec, Epoch, EthSpec, Hash256, Uint256}; - -pub struct ExecutionLayerRuntime { - pub runtime: Option>, - pub _runtime_shutdown: exit_future::Signal, - pub task_executor: TaskExecutor, - pub log: Logger, -} - -impl Default for ExecutionLayerRuntime { - fn default() -> Self { - let runtime = Arc::new( - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .unwrap(), - ); - let (runtime_shutdown, exit) = exit_future::signal(); - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let log = null_logger().unwrap(); - let task_executor = - TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); - - Self { - runtime: Some(runtime), - _runtime_shutdown: runtime_shutdown, - task_executor, - log, - } - } -} - -impl Drop for ExecutionLayerRuntime { - fn drop(&mut self) { - if let Some(runtime) = self.runtime.take() { - Arc::try_unwrap(runtime).unwrap().shutdown_background() - } - } -} +use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; pub struct MockExecutionLayer { pub server: MockServer, pub el: ExecutionLayer, - pub el_runtime: ExecutionLayerRuntime, + pub executor: TaskExecutor, pub spec: ChainSpec, } impl MockExecutionLayer { - pub fn default_params() -> Self { + pub fn default_params(executor: TaskExecutor) -> Self { Self::new( + executor, DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), @@ -65,13 +26,13 @@ impl MockExecutionLayer { } pub fn new( + executor: TaskExecutor, terminal_total_difficulty: Uint256, terminal_block: u64, terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, ) -> Self { - let el_runtime = ExecutionLayerRuntime::default(); - let handle = el_runtime.runtime.as_ref().unwrap().handle(); + let handle = executor.handle().unwrap(); let mut spec = T::default_spec(); spec.terminal_total_difficulty = terminal_total_difficulty; @@ -79,7 +40,7 @@ impl MockExecutionLayer { spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch; let server = MockServer::new( - handle, + &handle, terminal_total_difficulty, terminal_block, terminal_block_hash, @@ -97,17 +58,13 @@ impl MockExecutionLayer { suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() }; - let el = ExecutionLayer::from_config( - config, - el_runtime.task_executor.clone(), - el_runtime.log.clone(), - ) - .unwrap(); + let el = + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); Self { server, el, - el_runtime, + executor, spec, } } @@ -154,7 +111,7 @@ impl MockExecutionLayer { let validator_index = 0; let payload = self .el - .get_payload::( + .get_payload::>( parent_hash, timestamp, prev_randao, @@ -162,7 +119,8 @@ impl MockExecutionLayer { validator_index, ) .await - .unwrap(); + .unwrap() + .execution_payload; let block_hash = payload.block_hash; assert_eq!(payload.parent_hash, parent_hash); assert_eq!(payload.block_number, block_number); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 99adfa6558..805f6716fb 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,7 +22,7 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; pub use execution_block_generator::{generate_pow_block, ExecutionBlockGenerator}; -pub use mock_execution_layer::{ExecutionLayerRuntime, MockExecutionLayer}; +pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; @@ -68,6 +68,7 @@ impl MockServer { previous_request: <_>::default(), preloaded_responses, static_new_payload_response: <_>::default(), + static_forkchoice_updated_response: <_>::default(), _phantom: PhantomData, }); @@ -125,48 +126,156 @@ impl MockServer { self.ctx.previous_request.lock().take() } - pub fn all_payloads_valid(&self) { - let response = StaticNewPayloadResponse { - status: PayloadStatusV1 { - status: PayloadStatusV1Status::Valid, - latest_valid_hash: None, - validation_error: None, - }, - should_import: true, - }; + pub fn set_new_payload_response(&self, response: StaticNewPayloadResponse) { *self.ctx.static_new_payload_response.lock() = Some(response) } + pub fn set_forkchoice_updated_response(&self, status: PayloadStatusV1) { + *self.ctx.static_forkchoice_updated_response.lock() = Some(status); + } + + fn valid_status() -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: None, + validation_error: None, + } + } + + fn valid_new_payload_response() -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::valid_status(), + should_import: true, + } + } + + fn syncing_status() -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::Syncing, + latest_valid_hash: None, + validation_error: None, + } + } + + fn syncing_new_payload_response(should_import: bool) -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::syncing_status(), + should_import, + } + } + + fn invalid_status(latest_valid_hash: ExecutionBlockHash) -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::Invalid, + latest_valid_hash: Some(latest_valid_hash), + validation_error: Some("static response".into()), + } + } + + fn invalid_new_payload_response( + latest_valid_hash: ExecutionBlockHash, + ) -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::invalid_status(latest_valid_hash), + should_import: true, + } + } + + fn invalid_block_hash_status() -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::InvalidBlockHash, + latest_valid_hash: None, + validation_error: Some("static response".into()), + } + } + + fn invalid_block_hash_new_payload_response() -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::invalid_block_hash_status(), + should_import: true, + } + } + + fn invalid_terminal_block_status() -> PayloadStatusV1 { + PayloadStatusV1 { + status: PayloadStatusV1Status::InvalidTerminalBlock, + latest_valid_hash: None, + validation_error: Some("static response".into()), + } + } + + fn invalid_terminal_block_new_payload_response() -> StaticNewPayloadResponse { + StaticNewPayloadResponse { + status: Self::invalid_terminal_block_status(), + should_import: true, + } + } + + pub fn all_payloads_valid(&self) { + self.all_payloads_valid_on_new_payload(); + self.all_payloads_valid_on_forkchoice_updated(); + } + + pub fn all_payloads_valid_on_new_payload(&self) { + self.set_new_payload_response(Self::valid_new_payload_response()); + } + + pub fn all_payloads_valid_on_forkchoice_updated(&self) { + self.set_forkchoice_updated_response(Self::valid_status()); + } + /// Setting `should_import = true` simulates an EE that initially returns `SYNCING` but obtains - /// the block via it's own means (e.g., devp2p). + /// the block via its own means (e.g., devp2p). pub fn all_payloads_syncing(&self, should_import: bool) { - let response = StaticNewPayloadResponse { - status: PayloadStatusV1 { - status: PayloadStatusV1Status::Syncing, - latest_valid_hash: None, - validation_error: None, - }, - should_import, - }; - *self.ctx.static_new_payload_response.lock() = Some(response) + self.all_payloads_syncing_on_new_payload(should_import); + self.all_payloads_syncing_on_forkchoice_updated(); + } + + pub fn all_payloads_syncing_on_new_payload(&self, should_import: bool) { + self.set_new_payload_response(Self::syncing_new_payload_response(should_import)); + } + + pub fn all_payloads_syncing_on_forkchoice_updated(&self) { + self.set_forkchoice_updated_response(Self::syncing_status()); } pub fn all_payloads_invalid(&self, latest_valid_hash: ExecutionBlockHash) { - let response = StaticNewPayloadResponse { - status: PayloadStatusV1 { - status: PayloadStatusV1Status::Invalid, - latest_valid_hash: Some(latest_valid_hash), - validation_error: Some("static response".into()), - }, - should_import: true, - }; - *self.ctx.static_new_payload_response.lock() = Some(response) + self.all_payloads_invalid_on_new_payload(latest_valid_hash); + self.all_payloads_invalid_on_forkchoice_updated(latest_valid_hash); } - /// Disables any static payload response so the execution block generator will do its own + pub fn all_payloads_invalid_on_new_payload(&self, latest_valid_hash: ExecutionBlockHash) { + self.set_new_payload_response(Self::invalid_new_payload_response(latest_valid_hash)); + } + + pub fn all_payloads_invalid_on_forkchoice_updated( + &self, + latest_valid_hash: ExecutionBlockHash, + ) { + self.set_forkchoice_updated_response(Self::invalid_status(latest_valid_hash)); + } + + pub fn all_payloads_invalid_block_hash_on_new_payload(&self) { + self.set_new_payload_response(Self::invalid_block_hash_new_payload_response()); + } + + pub fn all_payloads_invalid_block_hash_on_forkchoice_updated(&self) { + self.set_forkchoice_updated_response(Self::invalid_block_hash_status()); + } + + pub fn all_payloads_invalid_terminal_block_on_new_payload(&self) { + self.set_new_payload_response(Self::invalid_terminal_block_new_payload_response()); + } + + pub fn all_payloads_invalid_terminal_block_on_forkchoice_updated(&self) { + self.set_forkchoice_updated_response(Self::invalid_terminal_block_status()); + } + + /// Disables any static payload responses so the execution block generator will do its own /// verification. pub fn full_payload_verification(&self) { - *self.ctx.static_new_payload_response.lock() = None + *self.ctx.static_new_payload_response.lock() = None; + *self.ctx.static_forkchoice_updated_response.lock() = None; } pub fn insert_pow_block( @@ -248,6 +357,7 @@ pub struct Context { pub preloaded_responses: Arc>>, pub previous_request: Arc>>, pub static_new_payload_response: Arc>>, + pub static_forkchoice_updated_response: Arc>>, pub _phantom: PhantomData, } diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 3a19fe0f21..87c56d360b 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -17,7 +17,7 @@ rayon = "1.4.1" state_processing = { path = "../../consensus/state_processing" } merkle_proof = { path = "../../consensus/merkle_proof" } eth2_ssz = "0.4.1" -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" tree_hash = "0.4.1" tokio = { version = "1.14.0", features = ["full"] } slog = "2.5.2" diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 7a23d128bd..a34618c2ef 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -27,8 +27,10 @@ slot_clock = { path = "../../common/slot_clock" } eth2_ssz = "0.4.1" bs58 = "0.4.0" futures = "0.3.8" -parking_lot = "0.11.0" +execution_layer = {path = "../execution_layer"} +parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} +task_executor = { path = "../../common/task_executor" } [dev-dependencies] @@ -36,6 +38,7 @@ store = { path = "../store" } environment = { path = "../../lighthouse/environment" } tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } +logging = { path = "../../common/logging" } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 5cd9894ade..2b4543656d 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -7,7 +7,7 @@ use state_processing::{ per_epoch_processing::EpochProcessingSummary, BlockReplayError, BlockReplayer, }; use std::sync::Arc; -use types::{BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock}; +use types::{BeaconState, BeaconStateError, EthSpec, Hash256}; use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; const MAX_REQUEST_RANGE_EPOCHS: usize = 100; @@ -112,7 +112,7 @@ pub fn get_attestation_performance( ) })?; let first_block = chain - .get_block(first_block_root) + .get_blinded_block(first_block_root) .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) }) @@ -120,7 +120,7 @@ pub fn get_attestation_performance( // Load the block of the prior slot which will be used to build the starting state. let prior_block = chain - .get_block(&first_block.parent_root()) + .get_blinded_block(&first_block.parent_root()) .and_then(|maybe_block| { maybe_block .ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root())) @@ -197,13 +197,13 @@ pub fn get_attestation_performance( .iter() .map(|root| { chain - .get_block(root) + .get_blinded_block(root) .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) }) .map_err(beacon_chain_error) }) - .collect::>, _>>()?; + .collect::, _>>()?; replayer = replayer .apply_blocks(blocks, None) diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index c21701f3a3..727215bfca 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,7 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlockId as CoreBlockId; use std::str::FromStr; -use types::{Hash256, SignedBeaconBlock, Slot}; +use types::{BlindedPayload, Hash256, SignedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -52,7 +52,55 @@ impl BlockId { } /// Return the `SignedBeaconBlock` identified by `self`. - pub fn block( + pub fn blinded_block( + &self, + chain: &BeaconChain, + ) -> Result>, warp::Rejection> { + match &self.0 { + CoreBlockId::Head => chain + .head_beacon_block() + .map(Into::into) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Slot(slot) => { + let root = self.root(chain)?; + chain + .get_blinded_block(&root) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|block_opt| match block_opt { + Some(block) => { + if block.slot() != *slot { + return Err(warp_utils::reject::custom_not_found(format!( + "slot {} was skipped", + slot + ))); + } + Ok(block) + } + None => Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))), + }) + } + _ => { + let root = self.root(chain)?; + chain + .get_blinded_block(&root) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) + }) + } + } + } + + /// Return the `SignedBeaconBlock` identified by `self`. + pub async fn full_block( &self, chain: &BeaconChain, ) -> Result, warp::Rejection> { @@ -64,6 +112,7 @@ impl BlockId { let root = self.root(chain)?; chain .get_block(&root) + .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|block_opt| match block_opt { Some(block) => { @@ -85,6 +134,7 @@ impl BlockId { let root = self.root(chain)?; chain .get_block(&root) + .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|root_opt| { root_opt.ok_or_else(|| { diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index d948c0d7d8..1b924f3828 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -10,8 +10,8 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::sync::Arc; use types::{ - BeaconCommittee, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Hash256, - OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, + BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, Epoch, EthSpec, + Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, }; use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; @@ -104,7 +104,7 @@ impl PackingEfficiencyHandler { fn apply_block( &mut self, - block: &SignedBeaconBlock, + block: &SignedBeaconBlock>, ) -> Result { let block_body = block.message().body(); let attestations = block_body.attestations(); @@ -251,7 +251,7 @@ pub fn get_block_packing_efficiency( .ok_or_else(|| custom_server_error("no blocks were loaded".to_string()))?; let first_block = chain - .get_block(first_block_root) + .get_blinded_block(first_block_root) .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) }) @@ -309,7 +309,7 @@ pub fn get_block_packing_efficiency( }; let pre_block_hook = |_state: &mut BeaconState, - block: &SignedBeaconBlock| + block: &SignedBeaconBlock<_, BlindedPayload<_>>| -> Result<(), PackingEfficiencyError> { let slot = block.slot(); @@ -363,13 +363,13 @@ pub fn get_block_packing_efficiency( .iter() .map(|root| { chain - .get_block(root) + .get_blinded_block(root) .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) }) .map_err(beacon_chain_error) }) - .collect::>, _>>()?; + .collect::, _>>()?; replayer = replayer .apply_blocks(blocks, None) diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index e911883349..014db8a602 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -2,17 +2,19 @@ use beacon_chain::store::{metadata::CURRENT_SCHEMA_VERSION, AnchorInfo}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::lighthouse::DatabaseInfo; use std::sync::Arc; -use types::SignedBeaconBlock; +use types::SignedBlindedBeaconBlock; pub fn info( chain: Arc>, ) -> Result { let store = &chain.store; let split = store.get_split_info(); + let config = store.get_config().clone(); let anchor = store.get_anchor_info(); Ok(DatabaseInfo { schema_version: CURRENT_SCHEMA_VERSION.as_u64(), + config, split, anchor, }) @@ -20,10 +22,10 @@ pub fn info( pub fn historical_blocks( chain: Arc>, - blocks: Vec>, + blocks: Vec>, ) -> Result { chain - .import_historical_block_batch(&blocks) + .import_historical_block_batch(blocks) .map_err(warp_utils::reject::beacon_chain_error)?; let anchor = chain.store.get_anchor_info().ok_or_else(|| { diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6f1bf0ee13..166ec9147f 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -45,10 +45,12 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttesterSlashing, BeaconStateError, CommitteeCache, ConfigAndPreset, Epoch, - EthSpec, ForkName, ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, - SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedVoluntaryExit, - Slot, SyncCommitteeMessage, SyncContributionData, + Attestation, AttesterSlashing, BeaconBlockBodyMerge, BeaconBlockMerge, BeaconStateError, + BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, + ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, + SignedBeaconBlock, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, + SignedContributionAndProof, SignedVoluntaryExit, Slot, SyncCommitteeMessage, + SyncContributionData, }; use version::{ add_consensus_version_header, fork_versioned_response, inconsistent_fork_rejection, @@ -825,10 +827,10 @@ pub fn serve( (None, None) => chain .head_beacon_block() .map_err(warp_utils::reject::beacon_chain_error) - .map(|block| (block.canonical_root(), block))?, + .map(|block| (block.canonical_root(), block.into()))?, // Only the parent root parameter, do a forwards-iterator lookup. (None, Some(parent_root)) => { - let parent = BlockId::from_root(parent_root).block(&chain)?; + let parent = BlockId::from_root(parent_root).blinded_block(&chain)?; let (root, _slot) = chain .forwards_iter_block_roots(parent.slot()) .map_err(warp_utils::reject::beacon_chain_error)? @@ -846,14 +848,14 @@ pub fn serve( })?; BlockId::from_root(root) - .block(&chain) + .blinded_block(&chain) .map(|block| (root, block))? } // Slot is supplied, search by slot and optionally filter by // parent root. (Some(slot), parent_root_opt) => { let root = BlockId::from_slot(slot).root(&chain)?; - let block = BlockId::from_root(root).block(&chain)?; + let block = BlockId::from_root(root).blinded_block(&chain)?; // If the parent root was supplied, check that it matches the block // obtained via a slot lookup. @@ -898,7 +900,7 @@ pub fn serve( .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { let root = block_id.root(&chain)?; - let block = BlockId::from_root(root).block(&chain)?; + let block = BlockId::from_root(root).blinded_block(&chain)?; let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) @@ -1022,6 +1024,116 @@ pub fn serve( }, ); + /* + * beacon/blocks + */ + + // POST beacon/blocks + let post_beacon_blinded_blocks = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |block: SignedBeaconBlock>, + chain: Arc>, + network_tx: UnboundedSender>, + _log: Logger| { + blocking_json_task(move || { + if let Some(el) = chain.execution_layer.as_ref() { + //FIXME(sean): we may not always receive the payload in this response because it + // should be the relay's job to propogate the block. However, since this block is + // already signed and sent this might be ok (so long as the relay validates + // the block before revealing the payload). + + //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should + // be able to support the normal block proposal flow, because at some point full block endpoints + // will be deprecated from the beacon API. This will entail creating full blocks in + // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded + // blocks. We will access the payload of those blocks here. This flow should happen if the + // execution layer has no payload builders or if we have not yet finalized post-merge transition. + let payload = el + .block_on(|el| el.propose_blinded_beacon_block(&block)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "proposal failed: {:?}", + e + )) + })?; + let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { + message: BeaconBlockMerge { + slot: block.message().slot(), + proposer_index: block.message().proposer_index(), + parent_root: block.message().parent_root(), + state_root: block.message().state_root(), + body: BeaconBlockBodyMerge { + randao_reveal: block.message().body().randao_reveal().clone(), + eth1_data: block.message().body().eth1_data().clone(), + graffiti: *block.message().body().graffiti(), + proposer_slashings: block + .message() + .body() + .proposer_slashings() + .clone(), + attester_slashings: block + .message() + .body() + .attester_slashings() + .clone(), + attestations: block.message().body().attestations().clone(), + deposits: block.message().body().deposits().clone(), + voluntary_exits: block + .message() + .body() + .voluntary_exits() + .clone(), + sync_aggregate: block + .message() + .body() + .sync_aggregate() + .unwrap() + .clone(), + execution_payload: payload.into(), + }, + }, + signature: block.signature().clone(), + }); + + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message( + &network_tx, + PubsubMessage::BeaconBlock(Box::new(new_block.clone())), + )?; + + match chain.process_block(new_block) { + Ok(_) => { + // Update the head since it's likely this block will become the new + // head. + chain + .fork_choice() + .map_err(warp_utils::reject::beacon_chain_error)?; + + Ok(()) + } + Err(e) => { + let msg = format!("{:?}", e); + + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } + } else { + Err(warp_utils::reject::custom_server_error( + "no execution layer found".to_string(), + )) + } + }) + }, + ); + let block_id_or_err = warp::path::param::().or_else(|_| async { Err(warp_utils::reject::custom_bad_request( "Invalid block ID".to_string(), @@ -1050,8 +1162,8 @@ pub fn serve( block_id: BlockId, chain: Arc>, accept_header: Option| { - blocking_task(move || { - let block = block_id.block(&chain)?; + async move { + let block = block_id.full_block(&chain).await?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1070,7 +1182,7 @@ pub fn serve( .map(|res| warp::reply::json(&res).into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) - }) + } }, ); @@ -1096,7 +1208,7 @@ pub fn serve( .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { block_id - .block(&chain) + .blinded_block(&chain) .map(|block| block.message().body().attestations().clone()) .map(api_types::GenericResponse::from) }) @@ -1899,7 +2011,69 @@ pub fn serve( }; let (block, _) = chain - .produce_block_with_verification( + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + fork_versioned_response(endpoint_version, fork_name, block) + }) + }, + ); + + // GET validator/blinded_blocks/{slot} + let get_validator_blinded_blocks = any_version + .and(warp::path("validator")) + .and(warp::path("blinded_blocks")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid slot".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp::query::()) + .and(chain_filter.clone()) + .and_then( + |endpoint_version: EndpointVersion, + slot: Slot, + query: api_types::ValidatorBlocksQuery, + chain: Arc>| { + blocking_json_task(move || { + let randao_reveal = query.randao_reveal.as_ref().map_or_else( + || { + if query.verify_randao { + Err(warp_utils::reject::custom_bad_request( + "randao_reveal is mandatory unless verify_randao=false".into(), + )) + } else { + Ok(Signature::empty()) + } + }, + |sig_bytes| { + sig_bytes.try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + }) + }, + )?; + + let randao_verification = if query.verify_randao { + ProduceBlockVerification::VerifyRandao + } else { + ProduceBlockVerification::NoVerification + }; + + let (block, _) = chain + .produce_block_with_verification::>( randao_reveal, slot, query.graffiti.map(Into::into), @@ -1965,6 +2139,12 @@ pub fn serve( query.slot, &query.attestation_data_root, ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch aggregate: {:?}", + e + )) + })? .map(api_types::GenericResponse::from) .ok_or_else(|| { warp_utils::reject::custom_not_found( @@ -2607,7 +2787,7 @@ pub fn serve( .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( - |blocks: Vec>, + |blocks: Vec>, chain: Arc>, log: Logger| { info!( @@ -2766,6 +2946,7 @@ pub fn serve( .or(get_node_peer_count.boxed()) .or(get_validator_duties_proposer.boxed()) .or(get_validator_blocks.boxed()) + .or(get_validator_blinded_blocks.boxed()) .or(get_validator_attestation_data.boxed()) .or(get_validator_aggregate_attestation.boxed()) .or(get_validator_sync_committee_contribution.boxed()) @@ -2791,6 +2972,7 @@ pub fn serve( .or(warp::post().and( post_beacon_blocks .boxed() + .or(post_beacon_blinded_blocks.boxed()) .or(post_beacon_pool_attestations.boxed()) .or(post_beacon_pool_attester_slashings.boxed()) .or(post_beacon_pool_proposer_slashings.boxed()) diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 64ce3b6566..8b12aa4a5b 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,7 +1,9 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` use crate::common::*; +use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use eth2::types::DepositContractData; -use types::{EthSpec, MainnetEthSpec}; +use tree_hash::TreeHash; +use types::{EthSpec, FullPayload, MainnetEthSpec, Slot}; type E = MainnetEthSpec; @@ -30,3 +32,96 @@ async fn deposit_contract_custom_network() { assert_eq!(result, expected); } + +// Test that running fork choice before proposing results in selection of the correct head. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +pub async fn fork_choice_before_proposal() { + // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing + // `validator_count // 32`. + let validator_count = 32; + let all_validators = (0..validator_count).collect::>(); + let num_initial: u64 = 31; + + let tester = InteractiveTester::::new(None, validator_count).await; + let harness = &tester.harness; + + // Create some chain depth. + harness.advance_slot(); + harness.extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + // We set up the following block graph, where B is a block that is temporarily orphaned by C, + // but is then reinstated and built upon by D. + // + // A | B | - | D | + // ^ | - | C | + let slot_a = Slot::new(num_initial); + let slot_b = slot_a + 1; + let slot_c = slot_a + 2; + let slot_d = slot_a + 3; + + let state_a = harness.get_current_state(); + let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b); + let block_root_b = harness.process_block(slot_b, block_b).unwrap(); + + // Create attestations to B but keep them in reserve until after C has been processed. + let attestations_b = harness.make_attestations( + &all_validators, + &state_b, + state_b.tree_hash_root(), + block_root_b, + slot_b, + ); + + let (block_c, state_c) = harness.make_block(state_a, slot_c); + let block_root_c = harness.process_block(slot_c, block_c.clone()).unwrap(); + + // Create attestations to C from a small number of validators and process them immediately. + let attestations_c = harness.make_attestations( + &all_validators[..validator_count / 2], + &state_c, + state_c.tree_hash_root(), + block_root_c, + slot_c, + ); + harness.process_attestations(attestations_c); + + // Apply the attestations to B, but don't re-run fork choice. + harness.process_attestations(attestations_b); + + // Due to proposer boost, the head should be C during slot C. + assert_eq!( + harness.chain.head_info().unwrap().block_root, + block_root_c.into() + ); + + // Ensure that building a block via the HTTP API re-runs fork choice and builds block D upon B. + // Manually prod the per-slot task, because the slot timer doesn't run in the background in + // these tests. + harness.advance_slot(); + harness.chain.per_slot_task(); + + let proposer_index = state_b + .get_beacon_proposer_index(slot_d, &harness.chain.spec) + .unwrap(); + let randao_reveal = harness + .sign_randao_reveal(&state_b, proposer_index, slot_d) + .into(); + let block_d = tester + .client + .get_validator_blocks::>(slot_d, &randao_reveal, None) + .await + .unwrap() + .data; + + // Head is now B. + assert_eq!( + harness.chain.head_info().unwrap().block_root, + block_root_b.into() + ); + // D's parent is B. + assert_eq!(block_d.parent_root(), block_root_b.into()); +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index f3a1ccbf05..5f53a96156 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -20,6 +20,7 @@ use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; +use task_executor::test_utils::TestRuntime; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use tree_hash::TreeHash; @@ -63,6 +64,7 @@ struct ApiTester { network_rx: mpsc::UnboundedReceiver>, local_enr: Enr, external_peer_id: PeerId, + _runtime: TestRuntime, } impl ApiTester { @@ -121,8 +123,7 @@ impl ApiTester { harness.chain.slot().unwrap(), ) .into_iter() - .map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) - .flatten() + .flat_map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) .collect::>(); assert!( @@ -186,7 +187,7 @@ impl ApiTester { external_peer_id, } = create_api_server(chain.clone(), log).await; - tokio::spawn(server); + harness.runtime.task_executor.spawn(server, "api_server"); let client = BeaconNodeHttpClient::new( SensitiveUrl::parse(&format!( @@ -213,6 +214,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + _runtime: harness.runtime, } } @@ -244,8 +246,7 @@ impl ApiTester { harness.chain.slot().unwrap(), ) .into_iter() - .map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) - .flatten() + .flat_map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) .collect::>(); let attester_slashing = harness.make_attester_slashing(vec![0, 1]); @@ -265,7 +266,7 @@ impl ApiTester { external_peer_id, } = create_api_server(chain.clone(), log).await; - tokio::spawn(server); + harness.runtime.task_executor.spawn(server, "api_server"); let client = BeaconNodeHttpClient::new( SensitiveUrl::parse(&format!( @@ -292,6 +293,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + _runtime: harness.runtime, } } @@ -762,9 +764,9 @@ impl ApiTester { } } - fn get_block(&self, block_id: BlockId) -> Option> { - let root = self.get_block_root(block_id); - root.and_then(|root| self.chain.get_block(&root).unwrap()) + async fn get_block(&self, block_id: BlockId) -> Option> { + let root = self.get_block_root(block_id)?; + self.chain.get_block(&root).await.unwrap() } pub async fn test_beacon_headers_all_slots(self) -> Self { @@ -859,7 +861,11 @@ impl ApiTester { } } - let block_opt = block_root_opt.and_then(|root| self.chain.get_block(&root).unwrap()); + let block_opt = if let Some(root) = block_root_opt { + self.chain.get_block(&root).await.unwrap() + } else { + None + }; if block_opt.is_none() && result.is_none() { continue; @@ -945,7 +951,7 @@ impl ApiTester { pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { - let expected = self.get_block(block_id); + let expected = self.get_block(block_id).await; if let BlockId::Slot(slot) = block_id { if expected.is_none() { @@ -1030,6 +1036,7 @@ impl ApiTester { let expected = self .get_block(block_id) + .await .map(|block| block.message().body().attestations().clone().into()); if let BlockId::Slot(slot) = block_id { @@ -1902,7 +1909,7 @@ impl ApiTester { let block = self .client - .get_validator_blocks::(slot, &randao_reveal, None) + .get_validator_blocks::>(slot, &randao_reveal, None) .await .unwrap() .data; @@ -1925,7 +1932,12 @@ impl ApiTester { let block = self .client - .get_validator_blocks_with_verify_randao::(slot, None, None, Some(false)) + .get_validator_blocks_with_verify_randao::>( + slot, + None, + None, + Some(false), + ) .await .unwrap() .data; @@ -1976,13 +1988,13 @@ impl ApiTester { // Check failure with no `verify_randao` passed. self.client - .get_validator_blocks::(slot, &bad_randao_reveal, None) + .get_validator_blocks::>(slot, &bad_randao_reveal, None) .await .unwrap_err(); // Check failure with `verify_randao=true`. self.client - .get_validator_blocks_with_verify_randao::( + .get_validator_blocks_with_verify_randao::>( slot, Some(&bad_randao_reveal), None, @@ -1993,14 +2005,16 @@ impl ApiTester { // Check failure with no randao reveal provided. self.client - .get_validator_blocks_with_verify_randao::(slot, None, None, None) + .get_validator_blocks_with_verify_randao::>( + slot, None, None, None, + ) .await .unwrap_err(); // Check success with `verify_randao=false`. let block = self .client - .get_validator_blocks_with_verify_randao::( + .get_validator_blocks_with_verify_randao::>( slot, Some(&bad_randao_reveal), None, @@ -2378,8 +2392,7 @@ impl ApiTester { .unwrap(); let attesting_validators: Vec = committees .into_iter() - .map(|committee| committee.committee.iter().cloned()) - .flatten() + .flat_map(|committee| committee.committee.iter().cloned()) .collect(); // All attesters should now be considered live let expected = expected diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 6f746705d6..5ed3614de6 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -26,18 +26,18 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } smallvec = "1.6.1" tokio-io-timeout = "1.1.1" lru = "0.7.1" -parking_lot = "0.11.0" +parking_lot = "0.12.0" sha2 = "0.9.1" snap = "1.0.1" hex = "0.4.2" tokio-util = { version = "0.6.2", features = ["codec", "compat", "time"] } tiny-keccak = "2.0.2" task_executor = { path = "../../common/task_executor" } -rand = "0.7.3" +rand = "0.8.5" directory = { path = "../../common/directory" } regex = "1.5.5" -strum = { version = "0.21.0", features = ["derive"] } -superstruct = "0.4.0" +strum = { version = "0.24.0", features = ["derive"] } +superstruct = "0.5.0" prometheus-client = "0.15.0" unused_port = { path = "../../common/unused_port" } diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index b5b0049cbd..e67bb29de3 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -991,7 +991,7 @@ where debug!( self.log, "Ignoring rpc message of disconnecting peer"; - "peer" => %peer_id + event ); return; } diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 1d542a7f39..b513ede59f 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -210,10 +210,8 @@ pub fn load_enr_from_disk(dir: &Path) -> Result { let mut enr_string = String::new(); match enr_file.read_to_string(&mut enr_string) { Err(_) => Err("Could not read ENR from file".to_string()), - Ok(_) => match Enr::from_str(&enr_string) { - Ok(disk_enr) => Ok(disk_enr), - Err(e) => Err(format!("ENR from file could not be decoded: {:?}", e)), - }, + Ok(_) => Enr::from_str(&enr_string) + .map_err(|e| format!("ENR from file could not be decoded: {:?}", e)), } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 437d05d474..85c0ddd950 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -213,6 +213,8 @@ impl PeerManager { ScoreUpdateResult::Disconnect => { // The peer has transitioned to a disconnect state and has been marked as such in // the peer db. We must inform libp2p to disconnect this peer. + self.inbound_ping_peers.remove(peer_id); + self.outbound_ping_peers.remove(peer_id); self.events.push(PeerManagerEvent::DisconnectPeer( *peer_id, GoodbyeReason::BadScore, @@ -388,7 +390,7 @@ impl PeerManager { /// Updates `PeerInfo` with `identify` information. pub fn identify(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { - let previous_kind = peer_info.client().kind.clone(); + let previous_kind = peer_info.client().kind; let previous_listening_addresses = peer_info.set_listening_addresses(info.listen_addrs.clone()); peer_info.set_client(peerdb::client::Client::from_identify_info(info)); @@ -412,12 +414,9 @@ impl PeerManager { ) { metrics::inc_gauge_vec( &metrics::PEERS_PER_CLIENT, - &[&peer_info.client().kind.to_string()], - ); - metrics::dec_gauge_vec( - &metrics::PEERS_PER_CLIENT, - &[&previous_kind.to_string()], + &[peer_info.client().kind.as_ref()], ); + metrics::dec_gauge_vec(&metrics::PEERS_PER_CLIENT, &[previous_kind.as_ref()]); } } } else { @@ -462,7 +461,7 @@ impl PeerManager { // Our fault. Do nothing return; } - RPCError::InvalidData => { + RPCError::InvalidData(_) => { // Peer is not complying with the protocol. This is considered a malicious action PeerAction::Fatal } @@ -674,7 +673,7 @@ impl PeerManager { let value = clients_per_peer.get(&client_kind.to_string()).unwrap_or(&0); metrics::set_gauge_vec( &metrics::PEERS_PER_CLIENT, - &[&client_kind.to_string()], + &[client_kind.as_ref()], *value as i64, ); } @@ -842,21 +841,14 @@ impl PeerManager { let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers(); let wanted_peers = if peer_count < self.target_peers.saturating_sub(dialing_peers) { // We need more peers in general. - // The maximum discovery query is for 16 peers, but we can search for less if - // needed. - std::cmp::min( - self.target_peers.saturating_sub(dialing_peers) - peer_count, - 16, - ) + // Note: The maximum discovery query is bounded by `Discovery`. + self.target_peers.saturating_sub(dialing_peers) - peer_count } else if outbound_only_peer_count < self.min_outbound_only_peers() && peer_count < self.max_outbound_dialing_peers() { - std::cmp::min( - self.max_outbound_dialing_peers() - .saturating_sub(dialing_peers) - - peer_count, - 16, - ) + self.max_outbound_dialing_peers() + .saturating_sub(dialing_peers) + - peer_count } else { 0 }; diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs index 7cc84516a0..f15f38daa6 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/client.rs @@ -4,7 +4,7 @@ use libp2p::identify::IdentifyInfo; use serde::Serialize; -use strum::{AsRefStr, AsStaticStr, EnumIter}; +use strum::{AsRefStr, EnumIter, IntoStaticStr}; /// Various client and protocol information related to a node. #[derive(Clone, Debug, Serialize)] @@ -21,7 +21,7 @@ pub struct Client { pub agent_string: Option, } -#[derive(Clone, Debug, Serialize, PartialEq, AsRefStr, AsStaticStr, EnumIter)] +#[derive(Clone, Copy, Debug, Serialize, PartialEq, AsRefStr, IntoStaticStr, EnumIter)] pub enum ClientKind { /// A lighthouse node (the best kind). Lighthouse, diff --git a/beacon_node/lighthouse_network/src/rpc/codec/base.rs b/beacon_node/lighthouse_network/src/rpc/codec/base.rs index eca0578785..53f85d9a7b 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/base.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/base.rs @@ -184,13 +184,25 @@ mod tests { use crate::rpc::protocol::*; use std::sync::Arc; - use types::{ForkContext, Hash256}; + use types::{Epoch, ForkContext, ForkName, Hash256, Slot}; use unsigned_varint::codec::Uvi; type Spec = types::MainnetEthSpec; - fn fork_context() -> ForkContext { - ForkContext::new::(types::Slot::new(0), Hash256::zero(), &Spec::default_spec()) + fn fork_context(fork_name: ForkName) -> ForkContext { + let mut chain_spec = Spec::default_spec(); + let altair_fork_epoch = Epoch::new(1); + let merge_fork_epoch = Epoch::new(2); + + chain_spec.altair_fork_epoch = Some(altair_fork_epoch); + chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + + let current_slot = match fork_name { + ForkName::Base => Slot::new(0), + ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + }; + ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } #[test] @@ -202,9 +214,12 @@ mod tests { let snappy_protocol_id = ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context()); - let mut snappy_outbound_codec = - SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576, fork_context); + let fork_context = Arc::new(fork_context(ForkName::Base)); + let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( + snappy_protocol_id, + max_rpc_size(&fork_context), + fork_context, + ); // remove response code let mut snappy_buf = buf.clone(); @@ -234,9 +249,12 @@ mod tests { let snappy_protocol_id = ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context()); - let mut snappy_outbound_codec = - SSZSnappyOutboundCodec::::new(snappy_protocol_id, 1_048_576, fork_context); + let fork_context = Arc::new(fork_context(ForkName::Base)); + let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( + snappy_protocol_id, + max_rpc_size(&fork_context), + fork_context, + ); let snappy_decoded_message = snappy_outbound_codec.decode(&mut dst).unwrap_err(); @@ -260,36 +278,50 @@ mod tests { ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy); // Response limits - let limit = protocol_id.rpc_response_limits::(); + let fork_context = Arc::new(fork_context(ForkName::Base)); + let max_rpc_size = max_rpc_size(&fork_context); + let limit = protocol_id.rpc_response_limits::(&fork_context); let mut max = encode_len(limit.max + 1); - let fork_context = Arc::new(fork_context()); let mut codec = SSZSnappyOutboundCodec::::new( protocol_id.clone(), - 1_048_576, + max_rpc_size, fork_context.clone(), ); - assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData); + assert!(matches!( + codec.decode(&mut max).unwrap_err(), + RPCError::InvalidData(_) + )); let mut min = encode_len(limit.min - 1); let mut codec = SSZSnappyOutboundCodec::::new( protocol_id.clone(), - 1_048_576, + max_rpc_size, fork_context.clone(), ); - assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData); + assert!(matches!( + codec.decode(&mut min).unwrap_err(), + RPCError::InvalidData(_) + )); // Request limits let limit = protocol_id.rpc_request_limits(); let mut max = encode_len(limit.max + 1); let mut codec = SSZSnappyOutboundCodec::::new( protocol_id.clone(), - 1_048_576, + max_rpc_size, fork_context.clone(), ); - assert_eq!(codec.decode(&mut max).unwrap_err(), RPCError::InvalidData); + assert!(matches!( + codec.decode(&mut max).unwrap_err(), + RPCError::InvalidData(_) + )); let mut min = encode_len(limit.min - 1); - let mut codec = SSZSnappyOutboundCodec::::new(protocol_id, 1_048_576, fork_context); - assert_eq!(codec.decode(&mut min).unwrap_err(), RPCError::InvalidData); + let mut codec = + SSZSnappyOutboundCodec::::new(protocol_id, max_rpc_size, fork_context); + assert!(matches!( + codec.decode(&mut min).unwrap_err(), + RPCError::InvalidData(_) + )); } } diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 0924dca0c0..6bd4a96fb5 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -146,7 +146,10 @@ impl Decoder for SSZSnappyInboundCodec { // packet size for ssz container corresponding to `self.protocol`. let ssz_limits = self.protocol.rpc_request_limits(); if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { - return Err(RPCError::InvalidData); + return Err(RPCError::InvalidData(format!( + "RPC request length is out of bounds, length {}", + length + ))); } // Calculate worst case compression length for given uncompressed length let max_compressed_len = snap::raw::max_compress_len(length) as u64; @@ -279,9 +282,14 @@ impl Decoder for SSZSnappyOutboundCodec { // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of // packet size for ssz container corresponding to `self.protocol`. - let ssz_limits = self.protocol.rpc_response_limits::(); + let ssz_limits = self + .protocol + .rpc_response_limits::(&self.fork_context); if ssz_limits.is_out_of_bounds(length, self.max_packet_size) { - return Err(RPCError::InvalidData); + return Err(RPCError::InvalidData(format!( + "RPC response length is out of bounds, length {}", + length + ))); } // Calculate worst case compression length for given uncompressed length let max_compressed_len = snap::raw::max_compress_len(length) as u64; @@ -327,7 +335,10 @@ impl OutboundCodec> for SSZSnappyOutbound // Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of // packet size for ssz container corresponding to `ErrorType`. if length > self.max_packet_size || length > *ERROR_TYPE_MAX || length < *ERROR_TYPE_MIN { - return Err(RPCError::InvalidData); + return Err(RPCError::InvalidData(format!( + "RPC Error length is out of bounds, length {}", + length + ))); } // Calculate worst case compression length for given uncompressed length @@ -364,7 +375,10 @@ fn handle_error( // If snappy has read `max_compressed_len` from underlying stream and still can't fill buffer, we have a malicious message. // Report as `InvalidData` so that malicious peer gets banned. if num_bytes >= max_compressed_len { - Err(RPCError::InvalidData) + Err(RPCError::InvalidData(format!( + "Received malicious snappy message, num_bytes {}, max_compressed_len {}", + num_bytes, max_compressed_len + ))) } else { // Haven't received enough bytes to decode yet, wait for more Ok(None) @@ -460,7 +474,9 @@ fn handle_v1_request( // Handle this case just for completeness. Protocol::MetaData => { if !decoded_buffer.is_empty() { - Err(RPCError::InvalidData) + Err(RPCError::InternalError( + "Metadata requests shouldn't reach decoder", + )) } else { Ok(Some(InboundRequest::MetaData(PhantomData))) } @@ -486,7 +502,7 @@ fn handle_v2_request( // Handle this case just for completeness. Protocol::MetaData => { if !decoded_buffer.is_empty() { - Err(RPCError::InvalidData) + Err(RPCError::InvalidData("Metadata request".to_string())) } else { Ok(Some(InboundRequest::MetaData(PhantomData))) } @@ -510,7 +526,9 @@ fn handle_v1_response( decoded_buffer, )?))), // This case should be unreachable as `Goodbye` has no response. - Protocol::Goodbye => Err(RPCError::InvalidData), + Protocol::Goodbye => Err(RPCError::InvalidData( + "Goodbye RPC message has no valid response".to_string(), + )), Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Box::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), @@ -615,8 +633,8 @@ mod tests { }; use std::sync::Arc; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, Epoch, ForkContext, Hash256, Signature, - SignedBeaconBlock, Slot, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, ForkContext, + FullPayload, Hash256, Signature, SignedBeaconBlock, Slot, }; use snap::write::FrameEncoder; @@ -625,17 +643,27 @@ mod tests { type Spec = types::MainnetEthSpec; - fn fork_context() -> ForkContext { + fn fork_context(fork_name: ForkName) -> ForkContext { let mut chain_spec = Spec::default_spec(); - // Set fork_epoch to `Some` to ensure that the `ForkContext` object - // includes altair in the list of forks - chain_spec.altair_fork_epoch = Some(types::Epoch::new(42)); - ForkContext::new::(types::Slot::new(0), Hash256::zero(), &chain_spec) + let altair_fork_epoch = Epoch::new(1); + let merge_fork_epoch = Epoch::new(2); + + chain_spec.altair_fork_epoch = Some(altair_fork_epoch); + chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + + let current_slot = match fork_name { + ForkName::Base => Slot::new(0), + ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Merge => merge_fork_epoch.start_slot(Spec::slots_per_epoch()), + }; + ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } - fn base_block() -> SignedBeaconBlock { - let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&Spec::default_spec())); - SignedBeaconBlock::from_block(full_block, Signature::empty()) + /// Smallest sized block across all current forks. Useful for testing + /// min length check conditions. + fn empty_base_block() -> SignedBeaconBlock { + let empty_block = BeaconBlock::Base(BeaconBlockBase::::empty(&Spec::default_spec())); + SignedBeaconBlock::from_block(empty_block, Signature::empty()) } fn altair_block() -> SignedBeaconBlock { @@ -644,6 +672,36 @@ mod tests { SignedBeaconBlock::from_block(full_block, Signature::empty()) } + /// Merge block with length < max_rpc_size. + fn merge_block_small(fork_context: &ForkContext) -> SignedBeaconBlock { + let mut block: BeaconBlockMerge<_, FullPayload> = + BeaconBlockMerge::empty(&Spec::default_spec()); + let tx = VariableList::from(vec![0; 1024]); + let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); + + block.body.execution_payload.execution_payload.transactions = txs; + + let block = BeaconBlock::Merge(block); + assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context)); + SignedBeaconBlock::from_block(block, Signature::empty()) + } + + /// Merge block with length > MAX_RPC_SIZE. + /// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. + /// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. + fn merge_block_large(fork_context: &ForkContext) -> SignedBeaconBlock { + let mut block: BeaconBlockMerge<_, FullPayload> = + BeaconBlockMerge::empty(&Spec::default_spec()); + let tx = VariableList::from(vec![0; 1024]); + let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); + + block.body.execution_payload.execution_payload.transactions = txs; + + let block = BeaconBlock::Merge(block); + assert!(block.ssz_bytes_len() > max_rpc_size(fork_context)); + SignedBeaconBlock::from_block(block, Signature::empty()) + } + fn status_message() -> StatusMessage { StatusMessage { fork_digest: [0; 4], @@ -678,10 +736,11 @@ mod tests { protocol: Protocol, version: Version, message: RPCCodedResponse, + fork_name: ForkName, ) -> Result { - let max_packet_size = 1_048_576; let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context()); + let fork_context = Arc::new(fork_context(fork_name)); + let max_packet_size = max_rpc_size(&fork_context); let mut buf = BytesMut::new(); let mut snappy_inbound_codec = @@ -691,14 +750,43 @@ mod tests { Ok(buf) } + fn encode_without_length_checks( + bytes: Vec, + fork_name: ForkName, + ) -> Result { + let fork_context = fork_context(fork_name); + let mut dst = BytesMut::new(); + + // Add context bytes if required + dst.extend_from_slice(&fork_context.to_context_bytes(fork_name).unwrap()); + + let mut uvi_codec: Uvi = Uvi::default(); + + // Inserts the length prefix of the uncompressed bytes into dst + // encoded as a unsigned varint + uvi_codec + .encode(bytes.len(), &mut dst) + .map_err(RPCError::from)?; + + let mut writer = FrameEncoder::new(Vec::new()); + writer.write_all(&bytes).map_err(RPCError::from)?; + writer.flush().map_err(RPCError::from)?; + + // Write compressed bytes to `dst` + dst.extend_from_slice(writer.get_ref()); + + Ok(dst) + } + /// Attempts to decode the given protocol bytes as an rpc response fn decode( protocol: Protocol, version: Version, message: &mut BytesMut, + fork_name: ForkName, ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, version, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context()); + let fork_context = Arc::new(fork_context(fork_name)); let max_packet_size = max_rpc_size(&fork_context); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); @@ -711,9 +799,10 @@ mod tests { protocol: Protocol, version: Version, message: RPCCodedResponse, + fork_name: ForkName, ) -> Result>, RPCError> { - let mut encoded = encode(protocol, version.clone(), message)?; - decode(protocol, version, &mut encoded) + let mut encoded = encode(protocol, version.clone(), message, fork_name)?; + decode(protocol, version, &mut encoded, fork_name) } // Test RPCResponse encoding/decoding for V1 messages @@ -723,7 +812,8 @@ mod tests { encode_then_decode( Protocol::Status, Version::V1, - RPCCodedResponse::Success(RPCResponse::Status(status_message())) + RPCCodedResponse::Success(RPCResponse::Status(status_message())), + ForkName::Base, ), Ok(Some(RPCResponse::Status(status_message()))) ); @@ -732,7 +822,8 @@ mod tests { encode_then_decode( Protocol::Ping, Version::V1, - RPCCodedResponse::Success(RPCResponse::Pong(ping_message())) + RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), + ForkName::Base, ), Ok(Some(RPCResponse::Pong(ping_message()))) ); @@ -741,9 +832,12 @@ mod tests { encode_then_decode( Protocol::BlocksByRange, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))) + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new(base_block())))) + Ok(Some(RPCResponse::BlocksByRange(Box::new( + empty_base_block() + )))) ); assert!( @@ -752,6 +846,7 @@ mod tests { Protocol::BlocksByRange, Version::V1, RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + ForkName::Altair, ) .unwrap_err(), RPCError::SSZDecodeError(_) @@ -763,9 +858,12 @@ mod tests { encode_then_decode( Protocol::BlocksByRoot, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))) + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(base_block())))) + Ok(Some(RPCResponse::BlocksByRoot( + Box::new(empty_base_block()) + ))) ); assert!( @@ -774,6 +872,7 @@ mod tests { Protocol::BlocksByRoot, Version::V1, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + ForkName::Altair, ) .unwrap_err(), RPCError::SSZDecodeError(_) @@ -786,6 +885,7 @@ mod tests { Protocol::MetaData, Version::V1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ForkName::Base, ), Ok(Some(RPCResponse::MetaData(metadata()))), ); @@ -795,6 +895,7 @@ mod tests { Protocol::MetaData, Version::V1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ForkName::Base, ), Ok(Some(RPCResponse::MetaData(metadata()))), ); @@ -805,6 +906,7 @@ mod tests { Protocol::MetaData, Version::V1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + ForkName::Base, ), Ok(Some(RPCResponse::MetaData(metadata()))), ); @@ -819,6 +921,7 @@ mod tests { Protocol::Status, Version::V2, RPCCodedResponse::Success(RPCResponse::Status(status_message())), + ForkName::Base, ) .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), @@ -832,6 +935,7 @@ mod tests { Protocol::Ping, Version::V2, RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), + ForkName::Base, ) .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), @@ -843,44 +947,148 @@ mod tests { encode_then_decode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))) + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new(base_block())))) + Ok(Some(RPCResponse::BlocksByRange(Box::new( + empty_base_block() + )))) + ); + + // Decode the smallest possible base block when current fork is altair + // This is useful for checking that we allow for blocks smaller than + // the current_fork's rpc limit + assert_eq!( + encode_then_decode( + Protocol::BlocksByRange, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + ForkName::Altair, + ), + Ok(Some(RPCResponse::BlocksByRange(Box::new( + empty_base_block() + )))) ); assert_eq!( encode_then_decode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))) + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + ForkName::Altair, ), Ok(Some(RPCResponse::BlocksByRange(Box::new(altair_block())))) ); + let merge_block_small = merge_block_small(&fork_context(ForkName::Merge)); + let merge_block_large = merge_block_large(&fork_context(ForkName::Merge)); + assert_eq!( encode_then_decode( - Protocol::BlocksByRoot, + Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))) + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new( + merge_block_small.clone() + ))), + ForkName::Merge, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(base_block())))) + Ok(Some(RPCResponse::BlocksByRange(Box::new( + merge_block_small.clone() + )))) + ); + + let mut encoded = + encode_without_length_checks(merge_block_large.as_ssz_bytes(), ForkName::Merge) + .unwrap(); + + assert!( + matches!( + decode( + Protocol::BlocksByRange, + Version::V2, + &mut encoded, + ForkName::Merge, + ) + .unwrap_err(), + RPCError::InvalidData(_) + ), + "Decoding a block larger than max_rpc_size should fail" ); assert_eq!( encode_then_decode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))) + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + ForkName::Base, + ), + Ok(Some(RPCResponse::BlocksByRoot( + Box::new(empty_base_block()) + ))), + ); + + // Decode the smallest possible base block when current fork is altair + // This is useful for checking that we allow for blocks smaller than + // the current_fork's rpc limit + assert_eq!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + ForkName::Altair, + ), + Ok(Some(RPCResponse::BlocksByRoot( + Box::new(empty_base_block()) + ))) + ); + + assert_eq!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + ForkName::Altair, ), Ok(Some(RPCResponse::BlocksByRoot(Box::new(altair_block())))) ); + assert_eq!( + encode_then_decode( + Protocol::BlocksByRoot, + Version::V2, + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new( + merge_block_small.clone() + ))), + ForkName::Merge, + ), + Ok(Some(RPCResponse::BlocksByRoot(Box::new(merge_block_small)))) + ); + + let mut encoded = + encode_without_length_checks(merge_block_large.as_ssz_bytes(), ForkName::Merge) + .unwrap(); + + assert!( + matches!( + decode( + Protocol::BlocksByRoot, + Version::V2, + &mut encoded, + ForkName::Merge, + ) + .unwrap_err(), + RPCError::InvalidData(_) + ), + "Decoding a block larger than max_rpc_size should fail" + ); + // A MetaDataV1 still encodes as a MetaDataV2 since version is Version::V2 assert_eq!( encode_then_decode( Protocol::MetaData, Version::V2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata())) + RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ForkName::Base, ), Ok(Some(RPCResponse::MetaData(metadata_v2()))) ); @@ -889,7 +1097,8 @@ mod tests { encode_then_decode( Protocol::MetaData, Version::V2, - RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())) + RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), + ForkName::Altair, ), Ok(Some(RPCResponse::MetaData(metadata_v2()))) ); @@ -898,34 +1107,48 @@ mod tests { // Test RPCResponse encoding/decoding for V2 messages #[test] fn test_context_bytes_v2() { - let fork_context = fork_context(); + let fork_context = fork_context(ForkName::Altair); // Removing context bytes for v2 messages should error let mut encoded_bytes = encode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + ForkName::Base, ) .unwrap(); let _ = encoded_bytes.split_to(4); assert!(matches!( - decode(Protocol::BlocksByRange, Version::V2, &mut encoded_bytes).unwrap_err(), + decode( + Protocol::BlocksByRange, + Version::V2, + &mut encoded_bytes, + ForkName::Base + ) + .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), )); let mut encoded_bytes = encode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + ForkName::Base, ) .unwrap(); let _ = encoded_bytes.split_to(4); assert!(matches!( - decode(Protocol::BlocksByRange, Version::V2, &mut encoded_bytes).unwrap_err(), + decode( + Protocol::BlocksByRange, + Version::V2, + &mut encoded_bytes, + ForkName::Base + ) + .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), )); @@ -933,7 +1156,8 @@ mod tests { let mut encoded_bytes = encode( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + ForkName::Altair, ) .unwrap(); @@ -943,7 +1167,13 @@ mod tests { wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( - decode(Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes).unwrap_err(), + decode( + Protocol::BlocksByRange, + Version::V2, + &mut wrong_fork_bytes, + ForkName::Altair + ) + .unwrap_err(), RPCError::SSZDecodeError(_), )); @@ -952,6 +1182,7 @@ mod tests { Protocol::BlocksByRoot, Version::V2, RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + ForkName::Altair, ) .unwrap(); @@ -960,7 +1191,13 @@ mod tests { wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( - decode(Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes).unwrap_err(), + decode( + Protocol::BlocksByRange, + Version::V2, + &mut wrong_fork_bytes, + ForkName::Altair + ) + .unwrap_err(), RPCError::SSZDecodeError(_), )); @@ -972,17 +1209,25 @@ mod tests { Protocol::MetaData, Version::V2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), + ForkName::Altair, ) .unwrap(), ); - assert!(decode(Protocol::MetaData, Version::V2, &mut encoded_bytes).is_err()); + assert!(decode( + Protocol::MetaData, + Version::V2, + &mut encoded_bytes, + ForkName::Altair + ) + .is_err()); // Sending context bytes which do not correspond to any fork should return an error let mut encoded_bytes = encode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + ForkName::Altair, ) .unwrap(); @@ -991,7 +1236,13 @@ mod tests { wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( - decode(Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes).unwrap_err(), + decode( + Protocol::BlocksByRange, + Version::V2, + &mut wrong_fork_bytes, + ForkName::Altair + ) + .unwrap_err(), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), )); @@ -999,14 +1250,20 @@ mod tests { let mut encoded_bytes = encode( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + ForkName::Altair, ) .unwrap(); let mut part = encoded_bytes.split_to(3); assert_eq!( - decode(Protocol::BlocksByRange, Version::V2, &mut part), + decode( + Protocol::BlocksByRange, + Version::V2, + &mut part, + ForkName::Altair + ), Ok(None) ) } @@ -1061,17 +1318,17 @@ mod tests { dst.extend_from_slice(writer.get_ref()); // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. - assert_eq!( - decode(Protocol::Status, Version::V1, &mut dst).unwrap_err(), - RPCError::InvalidData - ); + assert!(matches!( + decode(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), + RPCError::InvalidData(_) + )); } /// Test a malicious snappy encoding for a V2 `BlocksByRange` message where the attacker /// sends a valid message filled with a stream of useless padding before the actual message. #[test] fn test_decode_malicious_v2_message() { - let fork_context = Arc::new(fork_context()); + let fork_context = Arc::new(fork_context(ForkName::Altair)); // 10 byte snappy stream identifier let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY"; @@ -1118,10 +1375,16 @@ mod tests { dst.extend_from_slice(writer.get_ref()); // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. - assert_eq!( - decode(Protocol::BlocksByRange, Version::V2, &mut dst).unwrap_err(), - RPCError::InvalidData - ); + assert!(matches!( + decode( + Protocol::BlocksByRange, + Version::V2, + &mut dst, + ForkName::Altair + ) + .unwrap_err(), + RPCError::InvalidData(_) + )); } /// Test sending a message with encoded length prefix > max_rpc_size. @@ -1157,9 +1420,9 @@ mod tests { writer.flush().unwrap(); dst.extend_from_slice(writer.get_ref()); - assert_eq!( - decode(Protocol::Status, Version::V1, &mut dst).unwrap_err(), - RPCError::InvalidData - ); + assert!(matches!( + decode(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), + RPCError::InvalidData(_) + )); } } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 2b9e7c4902..ac39e0cecc 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -137,7 +137,7 @@ enum HandlerState { /// /// While in this state the handler rejects new requests but tries to finish existing ones. /// Once the timer expires, all messages are killed. - ShuttingDown(Box), + ShuttingDown(Pin>), /// The handler is deactivated. A goodbye has been sent and no more messages are sent or /// received. Deactivated, @@ -252,7 +252,7 @@ where self.dial_queue.push((id, OutboundRequest::Goodbye(reason))); } - self.state = HandlerState::ShuttingDown(Box::new(sleep_until( + self.state = HandlerState::ShuttingDown(Box::pin(sleep_until( TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64), ))); } @@ -477,7 +477,7 @@ where ProtocolError::InvalidMessage | ProtocolError::TooManyProtocols => { // Peer is sending invalid data during the negotiation phase, not // participating in the protocol - RPCError::InvalidData + RPCError::InvalidData("Invalid message during negotiation".to_string()) } }, }; @@ -539,14 +539,15 @@ where } // Check if we are shutting down, and if the timer ran out - if let HandlerState::ShuttingDown(delay) = &self.state { - if delay.is_elapsed() { - self.state = HandlerState::Deactivated; - debug!(self.log, "Handler deactivated"); - return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::InternalError( - "Shutdown timeout", - ))); - } + if let HandlerState::ShuttingDown(delay) = &mut self.state { + match delay.as_mut().poll(cx) { + Poll::Ready(_) => { + self.state = HandlerState::Deactivated; + debug!(self.log, "Handler deactivated"); + return Poll::Ready(ConnectionHandlerEvent::Close(RPCError::Disconnected)); + } + Poll::Pending => {} + }; } // purge expired inbound substreams and send an error diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 087f8e5336..1ac9c9b2c0 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -9,7 +9,7 @@ use ssz_types::{ VariableList, }; use std::ops::Deref; -use strum::AsStaticStr; +use strum::IntoStaticStr; use superstruct::superstruct; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -263,7 +263,7 @@ pub enum RPCCodedResponse { } /// The code assigned to an erroneous `RPCResponse`. -#[derive(Debug, Clone, Copy, PartialEq, AsStaticStr)] +#[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] pub enum RPCResponseErrorCode { RateLimited, @@ -335,6 +335,19 @@ impl RPCResponseErrorCode { } } +use super::Protocol; +impl RPCResponse { + pub fn protocol(&self) -> Protocol { + match self { + RPCResponse::Status(_) => Protocol::Status, + RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange, + RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, + RPCResponse::Pong(_) => Protocol::Ping, + RPCResponse::MetaData(_) => Protocol::MetaData, + } + } +} + impl std::fmt::Display for RPCResponseErrorCode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 884acd9bcf..0bedd423b2 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -271,3 +271,38 @@ where Poll::Pending } } + +impl slog::KV for RPCMessage +where + TSpec: EthSpec, + Id: ReqId, +{ + fn serialize( + &self, + _record: &slog::Record, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + serializer.emit_arguments("peer_id", &format_args!("{}", self.peer_id))?; + let (msg_kind, protocol) = match &self.event { + Ok(received) => match received { + RPCReceived::Request(_, req) => ("request", req.protocol()), + RPCReceived::Response(_, res) => ("response", res.protocol()), + RPCReceived::EndOfStream(_, end) => ( + "end_of_stream", + match end { + ResponseTermination::BlocksByRange => Protocol::BlocksByRange, + ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, + }, + ), + }, + Err(error) => match &error { + HandlerErr::Inbound { proto, .. } => ("inbound_err", *proto), + HandlerErr::Outbound { proto, .. } => ("outbound_err", *proto), + }, + }; + serializer.emit_str("msg_kind", msg_kind)?; + serializer.emit_arguments("protocol", &format_args!("{}", protocol))?; + + slog::Result::Ok(()) + } +} diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 1e65041991..1639d17941 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -14,7 +14,7 @@ use std::io; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; -use strum::{AsStaticRef, AsStaticStr}; +use strum::IntoStaticStr; use tokio_io_timeout::TimeoutStream; use tokio_util::{ codec::Framed, @@ -63,7 +63,13 @@ lazy_static! { /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. - pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = types::ExecutionPayload::::max_execution_payload_size(); + /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network + /// with `MAX_RPC_SIZE_POST_MERGE`. + pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = + // Size of a full altair block + *SIGNED_BEACON_BLOCK_ALTAIR_MAX + + types::ExecutionPayload::::max_execution_payload_size() // adding max size of execution payload (~16gb) + + ssz::BYTES_PER_LENGTH_OFFSET; // Adding the additional ssz offset for the `ExecutionPayload` field pub static ref BLOCKS_BY_ROOT_REQUEST_MIN: usize = VariableList::::from(Vec::::new()) @@ -106,10 +112,29 @@ const REQUEST_TIMEOUT: u64 = 15; /// Returns the maximum bytes that can be sent across the RPC. pub fn max_rpc_size(fork_context: &ForkContext) -> usize { - if fork_context.fork_exists(ForkName::Merge) { - MAX_RPC_SIZE_POST_MERGE - } else { - MAX_RPC_SIZE + match fork_context.current_fork() { + ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, + ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, + } +} + +/// Returns the rpc limits for beacon_block_by_range and beacon_block_by_root responses. +/// +/// Note: This function should take care to return the min/max limits accounting for all +/// previous valid forks when adding a new fork variant. +pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { + match ¤t_fork { + ForkName::Base => { + RpcLimits::new(*SIGNED_BEACON_BLOCK_BASE_MIN, *SIGNED_BEACON_BLOCK_BASE_MAX) + } + ForkName::Altair => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair blocks + *SIGNED_BEACON_BLOCK_ALTAIR_MAX, // Altair block is larger than base blocks + ), + ForkName::Merge => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and merge blocks + *SIGNED_BEACON_BLOCK_MERGE_MAX, // Merge block is larger than base and altair blocks + ), } } @@ -269,39 +294,15 @@ impl ProtocolId { } /// Returns min and max size for messages of given protocol id responses. - pub fn rpc_response_limits(&self) -> RpcLimits { + pub fn rpc_response_limits(&self, fork_context: &ForkContext) -> RpcLimits { match self.message_name { Protocol::Status => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), ), Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response - Protocol::BlocksByRange => RpcLimits::new( - std::cmp::min( - std::cmp::min( - *SIGNED_BEACON_BLOCK_ALTAIR_MIN, - *SIGNED_BEACON_BLOCK_BASE_MIN, - ), - *SIGNED_BEACON_BLOCK_MERGE_MIN, - ), - std::cmp::max( - std::cmp::max( - *SIGNED_BEACON_BLOCK_ALTAIR_MAX, - *SIGNED_BEACON_BLOCK_BASE_MAX, - ), - *SIGNED_BEACON_BLOCK_MERGE_MAX, - ), - ), - Protocol::BlocksByRoot => RpcLimits::new( - std::cmp::min( - *SIGNED_BEACON_BLOCK_ALTAIR_MIN, - *SIGNED_BEACON_BLOCK_BASE_MIN, - ), - std::cmp::max( - *SIGNED_BEACON_BLOCK_ALTAIR_MAX, - *SIGNED_BEACON_BLOCK_BASE_MAX, - ), - ), + Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), + Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), @@ -510,7 +511,7 @@ impl InboundRequest { } /// Error in RPC Encoding/Decoding. -#[derive(Debug, Clone, PartialEq, AsStaticStr)] +#[derive(Debug, Clone, PartialEq, IntoStaticStr)] #[strum(serialize_all = "snake_case")] pub enum RPCError { /// Error when decoding the raw buffer from ssz. @@ -528,7 +529,7 @@ pub enum RPCError { /// Stream ended unexpectedly. IncompleteStream, /// Peer sent invalid data. - InvalidData, + InvalidData(String), /// An error occurred due to internal reasons. Ex: timer failure. InternalError(&'static str), /// Negotiation with this peer timed out. @@ -562,7 +563,7 @@ impl std::fmt::Display for RPCError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { RPCError::SSZDecodeError(ref err) => write!(f, "Error while decoding ssz: {:?}", err), - RPCError::InvalidData => write!(f, "Peer sent unexpected data"), + RPCError::InvalidData(ref err) => write!(f, "Peer sent unexpected data: {}", err), RPCError::IoError(ref err) => write!(f, "IO Error: {}", err), RPCError::ErrorResponse(ref code, ref reason) => write!( f, @@ -589,7 +590,7 @@ impl std::error::Error for RPCError { RPCError::StreamTimeout => None, RPCError::UnsupportedProtocol => None, RPCError::IncompleteStream => None, - RPCError::InvalidData => None, + RPCError::InvalidData(_) => None, RPCError::InternalError(_) => None, RPCError::ErrorResponse(_, _) => None, RPCError::NegotiationTimeout => None, @@ -617,8 +618,8 @@ impl RPCError { /// Used for metrics. pub fn as_static_str(&self) -> &'static str { match self { - RPCError::ErrorResponse(ref code, ..) => code.as_static(), - e => e.as_static(), + RPCError::ErrorResponse(ref code, ..) => code.into(), + e => e.into(), } } } diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index e79fdf464d..ea770de6c2 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -10,7 +10,9 @@ use std::sync::Arc; use std::sync::Weak; use std::time::Duration; use tokio::runtime::Runtime; -use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, Hash256, MinimalEthSpec}; +use types::{ + ChainSpec, EnrForkId, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Slot, +}; use unused_port::unused_tcp_port; #[allow(clippy::type_complexity)] @@ -26,13 +28,20 @@ type ReqId = usize; use tempfile::Builder as TempBuilder; /// Returns a dummy fork context -pub fn fork_context() -> ForkContext { +pub fn fork_context(fork_name: ForkName) -> ForkContext { let mut chain_spec = E::default_spec(); - // Set fork_epoch to `Some` to ensure that the `ForkContext` object - // includes altair in the list of forks - chain_spec.altair_fork_epoch = Some(types::Epoch::new(42)); - chain_spec.bellatrix_fork_epoch = Some(types::Epoch::new(84)); - ForkContext::new::(types::Slot::new(0), Hash256::zero(), &chain_spec) + let altair_fork_epoch = Epoch::new(1); + let merge_fork_epoch = Epoch::new(2); + + chain_spec.altair_fork_epoch = Some(altair_fork_epoch); + chain_spec.bellatrix_fork_epoch = Some(merge_fork_epoch); + + let current_slot = match fork_name { + ForkName::Base => Slot::new(0), + ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Merge => merge_fork_epoch.start_slot(E::slots_per_epoch()), + }; + ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } pub struct Libp2pInstance(LibP2PService, exit_future::Signal); @@ -90,6 +99,7 @@ pub async fn build_libp2p_instance( rt: Weak, boot_nodes: Vec, log: slog::Logger, + fork_name: ForkName, ) -> Libp2pInstance { let port = unused_tcp_port().unwrap(); let config = build_config(port, boot_nodes); @@ -101,7 +111,7 @@ pub async fn build_libp2p_instance( let libp2p_context = lighthouse_network::Context { config: &config, enr_fork_id: EnrForkId::default(), - fork_context: Arc::new(fork_context()), + fork_context: Arc::new(fork_context(fork_name)), chain_spec: &ChainSpec::minimal(), gossipsub_registry: None, }; @@ -125,10 +135,11 @@ pub async fn build_full_mesh( rt: Weak, log: slog::Logger, n: usize, + fork_name: ForkName, ) -> Vec { let mut nodes = Vec::with_capacity(n); for _ in 0..n { - nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone()).await); + nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name).await); } let multiaddrs: Vec = nodes .iter() @@ -154,12 +165,13 @@ pub async fn build_full_mesh( pub async fn build_node_pair( rt: Weak, log: &slog::Logger, + fork_name: ForkName, ) -> (Libp2pInstance, Libp2pInstance) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); - let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log).await; - let mut receiver = build_libp2p_instance(rt, vec![], receiver_log).await; + let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name).await; + let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name).await; let receiver_multiaddr = receiver.swarm.behaviour_mut().local_enr().multiaddr()[1].clone(); @@ -198,10 +210,15 @@ pub async fn build_node_pair( // Returns `n` peers in a linear topology #[allow(dead_code)] -pub async fn build_linear(rt: Weak, log: slog::Logger, n: usize) -> Vec { +pub async fn build_linear( + rt: Weak, + log: slog::Logger, + n: usize, + fork_name: ForkName, +) -> Vec { let mut nodes = Vec::with_capacity(n); for _ in 0..n { - nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone()).await); + nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name).await); } let multiaddrs: Vec = nodes diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 6f32e65263..5895d32d5d 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -12,7 +12,7 @@ use tokio::runtime::Runtime; use tokio::time::sleep; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, EthSpec, ForkContext, - Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, }; mod common; @@ -21,11 +21,11 @@ type E = MinimalEthSpec; /// Merge block with length < max_rpc_size. fn merge_block_small(fork_context: &ForkContext) -> BeaconBlock { - let mut block = BeaconBlockMerge::empty(&E::default_spec()); + let mut block = BeaconBlockMerge::::empty(&E::default_spec()); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(100).collect::>()); + let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); - block.body.execution_payload.transactions = txs; + block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context)); @@ -36,11 +36,11 @@ fn merge_block_small(fork_context: &ForkContext) -> BeaconBlock { /// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. fn merge_block_large(fork_context: &ForkContext) -> BeaconBlock { - let mut block = BeaconBlockMerge::empty(&E::default_spec()); + let mut block = BeaconBlockMerge::::empty(&E::default_spec()); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); - block.body.execution_payload.transactions = txs; + block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Merge(block); assert!(block.ssz_bytes_len() > max_rpc_size(fork_context)); @@ -61,7 +61,8 @@ fn test_status_rpc() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + let (mut sender, mut receiver) = + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; // Dummy STATUS RPC message let rpc_request = Request::Status(StatusMessage { @@ -159,7 +160,8 @@ fn test_blocks_by_range_chunked_rpc() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + let (mut sender, mut receiver) = + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { @@ -179,7 +181,7 @@ fn test_blocks_by_range_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRange(Some(Box::new(signed_full_block))); - let full_block = merge_block_small(&common::fork_context()); + let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_merge_small = Response::BlocksByRange(Some(Box::new(signed_full_block))); @@ -298,7 +300,8 @@ fn test_blocks_by_range_over_limit() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + let (mut sender, mut receiver) = + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { @@ -308,7 +311,7 @@ fn test_blocks_by_range_over_limit() { }); // BlocksByRange Response - let full_block = merge_block_large(&common::fork_context()); + let full_block = merge_block_large(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_merge_large = Response::BlocksByRange(Some(Box::new(signed_full_block))); @@ -395,7 +398,8 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + let (mut sender, mut receiver) = + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { @@ -526,7 +530,8 @@ fn test_blocks_by_range_single_empty_rpc() { rt.block_on(async { // get sender/receiver - let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + let (mut sender, mut receiver) = + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; // BlocksByRange Request let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { @@ -641,7 +646,8 @@ fn test_blocks_by_root_chunked_rpc() { let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver rt.block_on(async { - let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + let (mut sender, mut receiver) = + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; // BlocksByRoot Request let rpc_request = Request::BlocksByRoot(BlocksByRootRequest { @@ -664,7 +670,7 @@ fn test_blocks_by_root_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRoot(Some(Box::new(signed_full_block))); - let full_block = merge_block_small(&common::fork_context()); + let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_merge_small = Response::BlocksByRoot(Some(Box::new(signed_full_block))); @@ -779,7 +785,8 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver rt.block_on(async { - let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + let (mut sender, mut receiver) = + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; // BlocksByRoot Request let rpc_request = Request::BlocksByRoot(BlocksByRootRequest { @@ -916,7 +923,8 @@ fn test_goodbye_rpc() { let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver rt.block_on(async { - let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await; + let (mut sender, mut receiver) = + common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; // build the sender future let sender_future = async { diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 96458da0a8..5aae8652e7 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -29,7 +29,7 @@ error-chain = "0.12.4" tokio = { version = "1.14.0", features = ["full"] } tokio-stream = "0.1.3" smallvec = "1.6.1" -rand = "0.7.3" +rand = "0.8.5" fnv = "1.0.7" rlp = "0.5.0" lazy_static = "1.4.0" @@ -41,5 +41,6 @@ itertools = "0.10.0" num_cpus = "1.13.0" lru_cache = { path = "../../common/lru_cache" } if-addrs = "0.6.4" -strum = "0.21.0" +strum = "0.24.0" tokio-util = { version = "0.6.3", features = ["time"] } +derivative = "2.2.0" diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index eb40be960d..3e25bd1442 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -42,6 +42,7 @@ use crate::sync::manager::BlockProcessType; use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::parking_lot::Mutex; use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock}; +use derivative::Derivative; use futures::stream::{Stream, StreamExt}; use futures::task::Poll; use lighthouse_network::{ @@ -51,7 +52,6 @@ use lighthouse_network::{ use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use std::collections::VecDeque; -use std::fmt; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::task::Context; @@ -331,17 +331,13 @@ impl DuplicateCache { } /// An event to be processed by the manager task. +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct WorkEvent { drop_during_sync: bool, work: Work, } -impl fmt::Debug for WorkEvent { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - impl WorkEvent { /// Create a new `Work` event for some unaggregated attestation. pub fn unaggregated_attestation( @@ -615,7 +611,8 @@ impl std::convert::From> for WorkEvent { } /// A consensus message (or multiple) from the network that requires processing. -#[derive(Debug)] +#[derive(Derivative)] +#[derivative(Debug(bound = "T: BeaconChainTypes"))] pub enum Work { GossipAttestation { message_id: MessageId, @@ -1344,6 +1341,7 @@ impl BeaconProcessor { "worker" => worker_id, ); + let sub_executor = executor.clone(); executor.spawn_blocking( move || { let _worker_timer = worker_timer; @@ -1520,7 +1518,15 @@ impl BeaconProcessor { peer_id, request_id, request, - } => worker.handle_blocks_by_range_request(peer_id, request_id, request), + } => { + return worker.handle_blocks_by_range_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + } /* * Processing of blocks by roots requests from other peers. */ @@ -1528,7 +1534,15 @@ impl BeaconProcessor { peer_id, request_id, request, - } => worker.handle_blocks_by_root_request(peer_id, request_id, request), + } => { + return worker.handle_blocks_by_root_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + } Work::UnknownBlockAttestation { message_id, peer_id, diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 0f97bc7944..1c9d323576 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -20,7 +20,7 @@ use std::cmp; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; -use tokio::runtime::Runtime; +use tokio::runtime::Handle; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, @@ -324,20 +324,19 @@ impl TestRig { .unwrap(); } - fn runtime(&mut self) -> Arc { + fn handle(&mut self) -> Handle { self.environment .as_mut() .unwrap() .core_context() .executor - .runtime() - .upgrade() + .handle() .unwrap() } /// Assert that the `BeaconProcessor` doesn't produce any events in the given `duration`. pub fn assert_no_events_for(&mut self, duration: Duration) { - self.runtime().block_on(async { + self.handle().block_on(async { tokio::select! { _ = tokio::time::sleep(duration) => (), event = self.work_journal_rx.recv() => panic!( @@ -360,7 +359,7 @@ impl TestRig { .iter() .all(|ev| ev != &WORKER_FREED && ev != &NOTHING_TO_DO)); - let (events, worker_freed_remaining) = self.runtime().block_on(async { + let (events, worker_freed_remaining) = self.handle().block_on(async { let mut events = Vec::with_capacity(expected.len()); let mut worker_freed_remaining = expected.len(); @@ -415,7 +414,7 @@ impl TestRig { /// We won't attempt to listen for any more than `expected.len()` events. As such, it makes sense /// to use the `NOTHING_TO_DO` event to ensure that execution has completed. pub fn assert_event_journal_with_timeout(&mut self, expected: &[&str], timeout: Duration) { - let events = self.runtime().block_on(async { + let events = self.handle().block_on(async { let mut events = Vec::with_capacity(expected.len()); let drain_future = async { diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index f79a655745..2d2196b9e9 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -1,4 +1,4 @@ -use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE; +use crate::beacon_processor::{worker::FUTURE_SLOT_TOLERANCE, SendOnDrop}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; @@ -9,6 +9,7 @@ use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; use slot_clock::SlotClock; +use task_executor::TaskExecutor; use types::{Epoch, EthSpec, Hash256, Slot}; use super::Worker; @@ -122,38 +123,71 @@ impl Worker { /// Handle a `BlocksByRoot` request from the peer. pub fn handle_blocks_by_root_request( - &self, + self, + executor: TaskExecutor, + send_on_drop: SendOnDrop, peer_id: PeerId, request_id: PeerRequestId, request: BlocksByRootRequest, ) { - let mut send_block_count = 0; - for root in request.block_roots.iter() { - if let Ok(Some(block)) = self.chain.get_block_checking_early_attester_cache(root) { - self.send_response( - peer_id, - Response::BlocksByRoot(Some(Box::new(block))), - request_id, - ); - send_block_count += 1; - } else { - debug!(self.log, "Peer requested unknown block"; + // Fetching blocks is async because it may have to hit the execution layer for payloads. + executor.spawn( + async move { + let mut send_block_count = 0; + for root in request.block_roots.iter() { + match self + .chain + .get_block_checking_early_attester_cache(root) + .await + { + Ok(Some(block)) => { + self.send_response( + peer_id, + Response::BlocksByRoot(Some(Box::new(block))), + request_id, + ); + send_block_count += 1; + } + Ok(None) => { + debug!( + self.log, + "Peer requested unknown block"; + "peer" => %peer_id, + "request_root" => ?root + ); + } + Err(e) => { + debug!( + self.log, + "Error fetching block for peer"; + "peer" => %peer_id, + "request_root" => ?root, + "error" => ?e, + ); + } + } + } + debug!( + self.log, + "Received BlocksByRoot Request"; "peer" => %peer_id, - "request_root" => ?root); - } - } - debug!(self.log, "Received BlocksByRoot Request"; - "peer" => %peer_id, - "requested" => request.block_roots.len(), - "returned" => send_block_count); + "requested" => request.block_roots.len(), + "returned" => send_block_count + ); - // send stream termination - self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + // send stream termination + self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + drop(send_on_drop); + }, + "load_blocks_by_root_blocks", + ) } /// Handle a `BlocksByRange` request from the peer. pub fn handle_blocks_by_range_request( - &self, + self, + executor: TaskExecutor, + send_on_drop: SendOnDrop, peer_id: PeerId, request_id: PeerRequestId, mut req: BlocksByRangeRequest, @@ -228,54 +262,84 @@ impl Worker { // remove all skip slots let block_roots = block_roots.into_iter().flatten().collect::>(); - let mut blocks_sent = 0; - for root in block_roots { - if let Ok(Some(block)) = self.chain.store.get_block(&root) { - // Due to skip slots, blocks could be out of the range, we ensure they are in the - // range before sending - if block.slot() >= req.start_slot - && block.slot() < req.start_slot + req.count * req.step - { - blocks_sent += 1; - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - response: Response::BlocksByRange(Some(Box::new(block))), - id: request_id, - }); + // Fetching blocks is async because it may have to hit the execution layer for payloads. + executor.spawn( + async move { + let mut blocks_sent = 0; + + for root in block_roots { + match self.chain.get_block(&root).await { + Ok(Some(block)) => { + // Due to skip slots, blocks could be out of the range, we ensure they + // are in the range before sending + if block.slot() >= req.start_slot + && block.slot() < req.start_slot + req.count * req.step + { + blocks_sent += 1; + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlocksByRange(Some(Box::new(block))), + id: request_id, + }); + } + } + Ok(None) => { + error!( + self.log, + "Block in the chain is not in the store"; + "request_root" => ?root + ); + break; + } + Err(e) => { + error!( + self.log, + "Error fetching block for peer"; + "block_root" => ?root, + "error" => ?e + ); + break; + } + } } - } else { - error!(self.log, "Block in the chain is not in the store"; - "request_root" => ?root); - } - } - let current_slot = self - .chain - .slot() - .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); + let current_slot = self + .chain + .slot() + .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - if blocks_sent < (req.count as usize) { - debug!(self.log, "BlocksByRange Response processed"; - "peer" => %peer_id, - "msg" => "Failed to return all requested blocks", - "start_slot" => req.start_slot, - "current_slot" => current_slot, - "requested" => req.count, - "returned" => blocks_sent); - } else { - debug!(self.log, "BlocksByRange Response processed"; - "peer" => %peer_id, - "start_slot" => req.start_slot, - "current_slot" => current_slot, - "requested" => req.count, - "returned" => blocks_sent); - } + if blocks_sent < (req.count as usize) { + debug!( + self.log, + "BlocksByRange Response processed"; + "peer" => %peer_id, + "msg" => "Failed to return all requested blocks", + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => blocks_sent + ); + } else { + debug!( + self.log, + "BlocksByRange Response processed"; + "peer" => %peer_id, + "start_slot" => req.start_slot, + "current_slot" => current_slot, + "requested" => req.count, + "returned" => blocks_sent + ); + } - // send the stream terminator - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - response: Response::BlocksByRange(None), - id: request_id, - }); + // send the stream terminator + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlocksByRange(None), + id: request_id, + }); + drop(send_on_drop); + }, + "load_blocks_by_range_blocks", + ); } } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 082808f88e..943ee9cdaf 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -138,7 +138,7 @@ impl Worker { let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); - match self.process_backfill_blocks(&downloaded_blocks) { + match self.process_backfill_blocks(downloaded_blocks) { (_, Ok(_)) => { debug!(self.log, "Backfill batch processed"; "batch_epoch" => epoch, @@ -223,9 +223,10 @@ impl Worker { /// Helper function to process backfill block batches which only consumes the chain and blocks to process. fn process_backfill_blocks( &self, - blocks: &[SignedBeaconBlock], + blocks: Vec>, ) -> (usize, Result<(), ChainSegmentFailed>) { - match self.chain.import_historical_block_batch(blocks) { + let blinded_blocks = blocks.into_iter().map(Into::into).collect(); + match self.chain.import_historical_block_batch(blinded_blocks) { Ok(imported_blocks) => { metrics::inc_counter( &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_SUCCESS_TOTAL, diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 04aa514721..02c491cb01 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -9,7 +9,6 @@ use lighthouse_network::{ Gossipsub, NetworkGlobals, }; use std::sync::Arc; -use strum::AsStaticRef; use strum::IntoEnumIterator; use types::EthSpec; @@ -357,12 +356,12 @@ pub fn update_gossip_metrics( for client_kind in ClientKind::iter() { set_gauge_vec( &BEACON_BLOCK_MESH_PEERS_PER_CLIENT, - &[&client_kind.to_string()], + &[client_kind.as_ref()], 0_i64, ); set_gauge_vec( &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, - &[&client_kind.to_string()], + &[client_kind.as_ref()], 0_i64, ); } @@ -377,7 +376,7 @@ pub fn update_gossip_metrics( .peers .read() .peer_info(peer_id) - .map(|peer_info| peer_info.client().kind.as_static()) + .map(|peer_info| peer_info.client().kind.into()) .unwrap_or_else(|| "Unknown"); if let Some(v) = get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client]) @@ -392,7 +391,7 @@ pub fn update_gossip_metrics( .peers .read() .peer_info(peer_id) - .map(|peer_info| peer_info.client().kind.as_static()) + .map(|peer_info| peer_info.client().kind.into()) .unwrap_or_else(|| "Unknown"); if let Some(v) = get_int_gauge( &BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index b11dc1c7af..ece923ef59 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -4,11 +4,10 @@ use std::time::Duration; use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; -use lru_cache::LRUCache; +use lru_cache::LRUTimeCache; use slog::{crit, debug, error, trace, warn, Logger}; use smallvec::SmallVec; use store::{Hash256, SignedBeaconBlock}; -use strum::AsStaticRef; use tokio::sync::mpsc; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; @@ -30,7 +29,7 @@ mod single_block_lookup; #[cfg(test)] mod tests; -const FAILED_CHAINS_CACHE_SIZE: usize = 500; +const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; pub(crate) struct BlockLookups { @@ -38,7 +37,7 @@ pub(crate) struct BlockLookups { parent_queue: SmallVec<[ParentLookup; 3]>, /// A cache of failed chain lookups to prevent duplicate searches. - failed_chains: LRUCache, + failed_chains: LRUTimeCache, /// A collection of block hashes being searched for and a flag indicating if a result has been /// received or not. @@ -57,7 +56,9 @@ impl BlockLookups { pub fn new(beacon_processor_send: mpsc::Sender>, log: Logger) -> Self { Self { parent_queue: Default::default(), - failed_chains: LRUCache::new(FAILED_CHAINS_CACHE_SIZE), + failed_chains: LRUTimeCache::new(Duration::from_secs( + FAILED_CHAINS_CACHE_EXPIRY_SECONDS, + )), single_block_lookups: Default::default(), beacon_processor_send, log, @@ -176,7 +177,7 @@ impl BlockLookups { // request finished correctly, it will be removed after the block is processed. } Err(error) => { - let msg: &str = error.as_static(); + let msg: &str = error.into(); cx.report_peer(peer_id, PeerAction::LowToleranceError, msg); // Remove the request, if it can be retried it will be added with a new id. let mut req = request.remove(); @@ -219,7 +220,7 @@ impl BlockLookups { return; }; - match parent_lookup.verify_block(block, &self.failed_chains) { + match parent_lookup.verify_block(block, &mut self.failed_chains) { Ok(Some(block)) => { // Block is correct, send to the beacon processor. let chain_hash = parent_lookup.chain_hash(); @@ -243,7 +244,7 @@ impl BlockLookups { VerifyError::RootMismatch | VerifyError::NoBlockReturned | VerifyError::ExtraBlocksReturned => { - let e = e.as_static(); + let e = e.into(); warn!(self.log, "Peer sent invalid response to parent request."; "peer_id" => %peer_id, "reason" => e); @@ -310,8 +311,13 @@ impl BlockLookups { } } Err(e) => { - trace!(self.log, "Single block request failed on peer disconnection"; - "block_root" => %req.hash, "peer_id" => %peer_id, "reason" => e.as_static()); + trace!( + self.log, + "Single block request failed on peer disconnection"; + "block_root" => %req.hash, + "peer_id" => %peer_id, + "reason" => <&str>::from(e), + ); } } } @@ -402,8 +408,8 @@ impl BlockLookups { trace!(self.log, "Single block processing succeeded"; "block" => %root); } - match result { - Err(e) => match e { + if let Err(e) = result { + match e { BlockError::BlockIsAlreadyKnown => { // No error here } @@ -431,9 +437,6 @@ impl BlockLookups { } } } - }, - Ok(()) => { - // No error here } } diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index eb8d61ab9e..a9a3c34bc0 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,6 +1,6 @@ use lighthouse_network::PeerId; use store::{EthSpec, Hash256, SignedBeaconBlock}; -use strum::AsStaticStr; +use strum::IntoStaticStr; use crate::sync::{ manager::{Id, SLOT_IMPORT_TOLERANCE}, @@ -28,7 +28,7 @@ pub(crate) struct ParentLookup { current_parent_request_id: Option, } -#[derive(Debug, PartialEq, Eq, AsStaticStr)] +#[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum VerifyError { RootMismatch, NoBlockReturned, @@ -117,7 +117,7 @@ impl ParentLookup { pub fn verify_block( &mut self, block: Option>>, - failed_chains: &lru_cache::LRUCache, + failed_chains: &mut lru_cache::LRUTimeCache, ) -> Result>>, VerifyError> { let block = self.current_parent_request.verify_block(block)?; diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index a4df616cbb..347a4ae437 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -4,7 +4,7 @@ use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; use ssz_types::VariableList; use store::{EthSpec, Hash256, SignedBeaconBlock}; -use strum::AsStaticStr; +use strum::IntoStaticStr; /// Object representing a single block lookup request. #[derive(PartialEq, Eq)] @@ -28,14 +28,14 @@ pub enum State { Processing { peer_id: PeerId }, } -#[derive(Debug, PartialEq, Eq, AsStaticStr)] +#[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum VerifyError { RootMismatch, NoBlockReturned, ExtraBlocksReturned, } -#[derive(Debug, PartialEq, Eq, AsStaticStr)] +#[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupRequestError { TooManyAttempts, NoPeers, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 3f81647217..9f4142dd66 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -26,8 +26,9 @@ const BATCH_BUFFER_SIZE: u8 = 5; /// A return type for functions that act on a `Chain` which informs the caller whether the chain /// has been completed and should be removed or to be kept if further processing is /// required. -#[must_use = "Should be checked, since a failed chain must be removed. A chain that requested - being removed and continued is now in an inconsistent state"] +/// +/// Should be checked, since a failed chain must be removed. A chain that requested being removed +/// and continued is now in an inconsistent state. pub type ProcessingResult = Result; /// Reasons for removing a chain diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 185fc204ac..9953df81d0 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -49,13 +49,18 @@ use crate::sync::manager::Id; use crate::sync::network_context::SyncNetworkContext; use crate::sync::BatchProcessResult; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; -use slog::{crit, debug, error, trace}; +use lru_cache::LRUTimeCache; +use slog::{crit, debug, error, trace, warn}; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::mpsc; -use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; +use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; + +/// For how long we store failed finalized chains to prevent retries. +const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; /// The primary object dealing with long range/batch syncing. This contains all the active and /// non-active chains that need to be processed before the syncing is considered complete. This @@ -69,6 +74,8 @@ pub struct RangeSync> { /// A collection of chains that need to be downloaded. This stores any head or finalized chains /// that need to be downloaded. chains: ChainCollection, + /// Chains that have failed and are stored to prevent being retried. + failed_chains: LRUTimeCache, /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. beacon_processor_send: mpsc::Sender>, /// The syncing logger. @@ -88,6 +95,9 @@ where RangeSync { beacon_chain: beacon_chain.clone(), chains: ChainCollection::new(beacon_chain, log.clone()), + failed_chains: LRUTimeCache::new(std::time::Duration::from_secs( + FAILED_CHAINS_EXPIRY_SECONDS, + )), awaiting_head_peers: HashMap::new(), beacon_processor_send, log, @@ -128,6 +138,14 @@ where // determine which kind of sync to perform and set up the chains match RangeSyncType::new(self.beacon_chain.as_ref(), &local_info, &remote_info) { RangeSyncType::Finalized => { + // Make sure we have not recently tried this chain + if self.failed_chains.contains(&remote_info.finalized_root) { + debug!(self.log, "Disconnecting peer that belongs to previously failed chain"; + "failed_root" => %remote_info.finalized_root, "peer_id" => %peer_id); + network.goodbye_peer(peer_id, GoodbyeReason::IrrelevantNetwork); + return; + } + // Finalized chain search debug!(self.log, "Finalization sync peer joined"; "peer_id" => %peer_id); self.awaiting_head_peers.remove(&peer_id); @@ -338,6 +356,13 @@ where debug!(self.log, "Chain removed"; "sync_type" => ?sync_type, &chain, "reason" => ?remove_reason, "op" => op); } + if let RemoveChain::ChainFailed(_) = remove_reason { + if RangeSyncType::Finalized == sync_type { + warn!(self.log, "Chain failed! Syncing to its head won't be retried for at least the next {} seconds", FAILED_CHAINS_EXPIRY_SECONDS; &chain); + self.failed_chains.insert(chain.target_head_root); + } + } + network.status_peers(self.beacon_chain.as_ref(), chain.peers()); let local = match self.beacon_chain.status_message() { diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 361c0a07fc..84d23a4562 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -9,7 +9,7 @@ derivative = "2.1.1" itertools = "0.10.0" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -parking_lot = "0.11.0" +parking_lot = "0.12.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } eth2_ssz = "0.4.1" diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index dd547017d7..a6bb04d7b9 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -9,9 +9,7 @@ mod sync_aggregate_id; pub use attestation::AttMaxCover; pub use max_cover::MaxCover; -pub use persistence::{ - PersistedOperationPool, PersistedOperationPoolAltair, PersistedOperationPoolBase, -}; +pub use persistence::{PersistedOperationPool, PersistedOperationPoolAltair}; pub use reward_cache::RewardCache; use crate::sync_aggregate_id::SyncAggregateId; diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index d79e38fee3..84178d1309 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -17,7 +17,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { } impl PersistedOperationPool { - /// Convert an `OperationPool` into serializable form. Always converts to - /// `PersistedOperationPool::Altair` because the v3 to v4 database schema migration ensures - /// the op pool is always persisted as the Altair variant. + /// Convert an `OperationPool` into serializable form. pub fn from_operation_pool(operation_pool: &OperationPool) -> Self { let attestations = operation_pool .attestations @@ -114,15 +112,6 @@ impl PersistedOperationPool { .collect(), ); let op_pool = match self { - PersistedOperationPool::Base(_) => OperationPool { - attestations, - sync_contributions: <_>::default(), - attester_slashings, - proposer_slashings, - voluntary_exits, - reward_cache: Default::default(), - _phantom: Default::default(), - }, PersistedOperationPool::Altair(_) => { let sync_contributions = RwLock::new(self.sync_contributions()?.iter().cloned().collect()); @@ -140,44 +129,9 @@ impl PersistedOperationPool { }; Ok(op_pool) } - - /// Convert the `PersistedOperationPool::Base` variant to `PersistedOperationPool::Altair` by - /// setting `sync_contributions` to its default. - pub fn base_to_altair(self) -> Self { - match self { - PersistedOperationPool::Base(_) => { - PersistedOperationPool::Altair(PersistedOperationPoolAltair { - attestations: self.attestations().to_vec(), - sync_contributions: <_>::default(), - attester_slashings: self.attester_slashings().to_vec(), - proposer_slashings: self.proposer_slashings().to_vec(), - voluntary_exits: self.voluntary_exits().to_vec(), - }) - } - PersistedOperationPool::Altair(_) => self, - } - } } -/// This `StoreItem` implementation is necessary for migrating the `PersistedOperationPool` -/// in the v3 to v4 database schema migration. -impl StoreItem for PersistedOperationPoolBase { - fn db_column() -> DBColumn { - DBColumn::OpPool - } - - fn as_store_bytes(&self) -> Result, StoreError> { - Ok(self.as_ssz_bytes()) - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } -} - -/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::Altair` -/// because the v3 to v4 database schema migration ensures the persisted op pool is always stored -/// in the Altair format. +/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::Altair`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { DBColumn::OpPool diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index a1d9512346..bc03d003ed 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -438,6 +438,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .requires("merge") .takes_value(true) ) + .arg( + Arg::with_name("payload-builders") + .long("payload-builders") + .help("The URL of a service compatible with the MEV-boost API.") + .requires("merge") + .takes_value(true) + ) /* * Database. @@ -448,7 +455,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("SLOT_COUNT") .help("Specifies how often a freezer DB restore point should be stored. \ Cannot be changed after initialization. \ - [default: 2048 (mainnet) or 64 (minimal)]") + [default: 8192 (mainnet) or 64 (minimal)]") .takes_value(true) ) .arg( @@ -693,4 +700,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { experimental as it may obscure performance issues.") .takes_value(false) ) + .arg( + Arg::with_name("fork-choice-before-proposal-timeout") + .long("fork-choice-before-proposal-timeout") + .help("Set the maximum number of milliseconds to wait for fork choice before \ + proposing a block. You can prevent waiting at all by setting the timeout \ + to 0, however you risk proposing atop the wrong parent block.") + .default_value("250") + .takes_value(true) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 8c30dcfddb..ca266829de 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -250,6 +250,14 @@ pub fn get_config( el_config.execution_endpoints = client_config.eth1.endpoints.clone(); } + if let Some(endpoints) = cli_args.value_of("payload-builders") { + el_config.builder_endpoints = endpoints + .split(',') + .map(SensitiveUrl::parse) + .collect::>() + .map_err(|e| format!("payload-builders contains an invalid URL {:?}", e))?; + } + if let Some(secrets) = cli_args.value_of("jwt-secrets") { let secret_files: Vec<_> = secrets.split(',').map(PathBuf::from).collect(); if !secret_files.is_empty() && secret_files.len() != el_config.execution_endpoints.len() @@ -276,7 +284,9 @@ pub fn get_config( client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); } - client_config.store.slots_per_restore_point = get_slots_per_restore_point::(cli_args)?; + let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; + client_config.store.slots_per_restore_point = sprp; + client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; if let Some(block_cache_size) = clap_utils::parse_optional(cli_args, "block-cache-size")? { client_config.store.block_cache_size = block_cache_size; @@ -575,6 +585,12 @@ pub fn get_config( client_config.chain.enable_lock_timeouts = false; } + if let Some(timeout) = + clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")? + { + client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; + } + Ok(client_config) } @@ -809,15 +825,20 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { } /// Get the `slots_per_restore_point` value to use for the database. -pub fn get_slots_per_restore_point(cli_args: &ArgMatches) -> Result { +/// +/// Return `(sprp, set_explicitly)` where `set_explicitly` is `true` if the user provided the value. +pub fn get_slots_per_restore_point( + cli_args: &ArgMatches, +) -> Result<(u64, bool), String> { if let Some(slots_per_restore_point) = clap_utils::parse_optional(cli_args, "slots-per-restore-point")? { - Ok(slots_per_restore_point) + Ok((slots_per_restore_point, true)) } else { - Ok(std::cmp::min( + let default = std::cmp::min( E::slots_per_historical_root() as u64, store::config::DEFAULT_SLOTS_PER_RESTORE_POINT, - )) + ); + Ok((default, false)) } } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index e94a8403d4..f3bef5c027 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -11,7 +11,7 @@ beacon_chain = {path = "../beacon_chain"} [dependencies] db-key = "0.0.5" leveldb = { version = "0.8.6", default-features = false } -parking_lot = "0.11.0" +parking_lot = "0.12.0" itertools = "0.10.0" eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" @@ -29,4 +29,4 @@ directory = { path = "../../common/directory" } tree_hash = "0.4.0" take-until = "0.1.0" zstd = "0.10.0" -strum = { version = "0.24", features = ["derive"] } +strum = { version = "0.24.0", features = ["derive"] } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index abb164ad2f..63419d89a3 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -4,7 +4,8 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{EthSpec, MinimalEthSpec}; -pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; +pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; +pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 64; pub const DEFAULT_STATE_CACHE_SIZE: usize = 128; pub const DEFAULT_COMPRESSION_LEVEL: i32 = 1; @@ -15,6 +16,8 @@ const EST_COMPRESSION_FACTOR: usize = 2; pub struct StoreConfig { /// Number of slots to wait between storing restore points in the freezer database. pub slots_per_restore_point: u64, + /// Flag indicating whether the `slots_per_restore_point` was set explicitly by the user. + pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: usize, /// Maximum number of states to store in the in-memory state cache. @@ -44,6 +47,7 @@ impl Default for StoreConfig { Self { // Safe default for tests, shouldn't ever be read by a CLI node. slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, + slots_per_restore_point_set_explicitly: false, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, state_cache_size: DEFAULT_STATE_CACHE_SIZE, compression_level: DEFAULT_COMPRESSION_LEVEL, diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 76f273f902..2e4a866821 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -54,6 +54,10 @@ pub enum Error { state_root: Hash256, slot: Slot, }, + AddPayloadLogicError, + ResyncRequiredForExecutionPayloadSeparation, + SlotClockUnavailableForMigration, + V9MigrationFailure(Hash256), } pub trait HandleUnavailable { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index bf71296c59..c94070142f 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,7 +1,10 @@ use crate::chunked_vector::{ store_updated_vector, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots, }; -use crate::config::{OnDiskStoreConfig, StoreConfig}; +use crate::config::{ + OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT, + PREV_DEFAULT_SLOTS_PER_RESTORE_POINT, +}; use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator}; use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::iter::{ParentRootBlockIterator, StateRootsIterator}; @@ -16,8 +19,8 @@ use crate::metadata::{ use crate::metrics; use crate::state_cache::StateCache; use crate::{ - get_key_for_col, DBColumn, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, - StoreOp, + get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, + PartialBeaconState, StoreItem, StoreOp, }; use leveldb::iterator::LevelDBIterator; use lru::LruCache; @@ -92,6 +95,8 @@ pub enum HotColdDBError { MissingPrevState(Hash256), MissingSplitState(Hash256, Slot), MissingStateDiff(Hash256), + MissingExecutionPayload(Hash256), + MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, HotStateSummaryError(BeaconStateError), RestorePointDecodeError(ssz::DecodeError), @@ -159,7 +164,7 @@ impl HotColdDB, LevelDB> { Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; config.verify_compression_level()?; - let db = Arc::new(HotColdDB { + let mut db = HotColdDB { split: RwLock::new(Split::default()), anchor_info: RwLock::new(None), cold_db: LevelDB::open(cold_path)?, @@ -170,10 +175,46 @@ impl HotColdDB, LevelDB> { spec, log, _phantom: PhantomData, - }); + }; + + // Allow the slots-per-restore-point value to stay at the previous default if the config + // uses the new default. Don't error on a failed read because the config itself may need + // migrating. + if let Ok(Some(disk_config)) = db.load_config() { + if !db.config.slots_per_restore_point_set_explicitly + && disk_config.slots_per_restore_point == PREV_DEFAULT_SLOTS_PER_RESTORE_POINT + && db.config.slots_per_restore_point == DEFAULT_SLOTS_PER_RESTORE_POINT + { + debug!( + db.log, + "Ignoring slots-per-restore-point config in favour of on-disk value"; + "config" => db.config.slots_per_restore_point, + "on_disk" => disk_config.slots_per_restore_point, + ); + + // Mutate the in-memory config so that it's compatible. + db.config.slots_per_restore_point = PREV_DEFAULT_SLOTS_PER_RESTORE_POINT; + } + } + + // Load the previous split slot from the database (if any). This ensures we can + // stop and restart correctly. This needs to occur *before* running any migrations + // because some migrations load states and depend on the split. + if let Some(split) = db.load_split()? { + *db.split.write() = split; + *db.anchor_info.write() = db.load_anchor_info()?; + + info!( + db.log, + "Hot-Cold DB initialized"; + "split_slot" => split.slot, + "split_state" => ?split.state_root + ); + } // Ensure that the schema version of the on-disk database matches the software. // If the version is mismatched, an automatic migration will be attempted. + let db = Arc::new(db); if let Some(schema_version) = db.load_schema_version()? { debug!( db.log, @@ -192,21 +233,6 @@ impl HotColdDB, LevelDB> { } db.store_config()?; - // Load the previous split slot from the database (if any). This ensures we can - // stop and restart correctly. - if let Some(split) = db.load_split()? { - *db.split.write() = split; - *db.anchor_info.write() = db.load_anchor_info()?; - - info!( - db.log, - "Hot-Cold DB initialized"; - "version" => CURRENT_SCHEMA_VERSION.as_u64(), - "split_slot" => split.slot, - "split_state" => format!("{:?}", split.state_root) - ); - } - // Run a garbage collection pass. db.remove_garbage()?; @@ -266,53 +292,150 @@ impl, Cold: ItemStore> HotColdDB block: SignedBeaconBlock, ) -> Result<(), Error> { // Store on disk. - let op = self.block_as_kv_store_op(block_root, &block); - self.hot_db.do_atomically(vec![op])?; - + let mut ops = Vec::with_capacity(2); + let block = self.block_as_kv_store_ops(block_root, block, &mut ops)?; + self.hot_db.do_atomically(ops)?; // Update cache. self.block_cache.lock().put(*block_root, block); - Ok(()) } /// Prepare a signed beacon block for storage in the database. - pub fn block_as_kv_store_op( + /// + /// Return the original block for re-use after storage. It's passed by value so it can be + /// cracked open and have its payload extracted. + pub fn block_as_kv_store_ops( &self, key: &Hash256, - block: &SignedBeaconBlock, - ) -> KeyValueStoreOp { - // FIXME(altair): re-add block write/overhead metrics, or remove them - let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_bytes()); - KeyValueStoreOp::PutKeyValue(db_key, block.as_ssz_bytes()) + block: SignedBeaconBlock, + ops: &mut Vec, + ) -> Result, Error> { + // Split block into blinded block and execution payload. + let (blinded_block, payload) = block.into(); + + // Store blinded block. + self.blinded_block_as_kv_store_ops(key, &blinded_block, ops); + + // Store execution payload if present. + if let Some(ref execution_payload) = payload { + ops.push(execution_payload.as_kv_store_op(*key)?); + } + + // Re-construct block. This should always succeed. + blinded_block + .try_into_full_block(payload) + .ok_or(Error::AddPayloadLogicError) } - /// Fetch a block from the store. - pub fn get_block(&self, block_root: &Hash256) -> Result>, Error> { + /// Prepare a signed beacon block for storage in the datbase *without* its payload. + pub fn blinded_block_as_kv_store_ops( + &self, + key: &Hash256, + blinded_block: &SignedBeaconBlock>, + ops: &mut Vec, + ) { + let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_bytes()); + ops.push(KeyValueStoreOp::PutKeyValue( + db_key, + blinded_block.as_ssz_bytes(), + )); + } + + pub fn try_get_full_block( + &self, + block_root: &Hash256, + ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT); // Check the cache. if let Some(block) = self.block_cache.lock().get(block_root) { metrics::inc_counter(&metrics::BEACON_BLOCK_CACHE_HIT_COUNT); - return Ok(Some(block.clone())); + return Ok(Some(DatabaseBlock::Full(block.clone()))); } - let block = self.get_block_with(block_root, |bytes| { - SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec) - })?; + // Load the blinded block. + let blinded_block = match self.get_blinded_block(block_root)? { + Some(block) => block, + None => return Ok(None), + }; - // Add to cache. - if let Some(ref block) = block { - self.block_cache.lock().put(*block_root, block.clone()); - } + // If the block is after the split point then we should have the full execution payload + // stored in the database. Otherwise, just return the blinded block. + // Hold the split lock so that it can't change. + let split = self.split.read_recursive(); - Ok(block) + let block = if blinded_block.message().execution_payload().is_err() + || blinded_block.slot() >= split.slot + { + // Re-constructing the full block should always succeed here. + let full_block = self.make_full_block(block_root, blinded_block)?; + + // Add to cache. + self.block_cache.lock().put(*block_root, full_block.clone()); + + DatabaseBlock::Full(full_block) + } else { + DatabaseBlock::Blinded(blinded_block) + }; + drop(split); + + Ok(Some(block)) } - /// Fetch a block from the store, ignoring which fork variant it *should* be for. - pub fn get_block_any_variant( + /// Fetch a full block with execution payload from the store. + pub fn get_full_block( &self, block_root: &Hash256, ) -> Result>, Error> { + match self.try_get_full_block(block_root)? { + Some(DatabaseBlock::Full(block)) => Ok(Some(block)), + Some(DatabaseBlock::Blinded(block)) => Err( + HotColdDBError::MissingFullBlockExecutionPayloadPruned(*block_root, block.slot()) + .into(), + ), + None => Ok(None), + } + } + + /// Get a schema V8 or earlier full block by reading it and its payload from disk. + pub fn get_full_block_prior_to_v9( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + self.get_block_with(block_root, |bytes| { + SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec) + }) + } + + /// Convert a blinded block into a full block by loading its execution payload if necessary. + pub fn make_full_block( + &self, + block_root: &Hash256, + blinded_block: SignedBeaconBlock>, + ) -> Result, Error> { + if blinded_block.message().execution_payload().is_ok() { + let execution_payload = self.get_execution_payload(block_root)?; + blinded_block.try_into_full_block(Some(execution_payload)) + } else { + blinded_block.try_into_full_block(None) + } + .ok_or(Error::AddPayloadLogicError) + } + + pub fn get_blinded_block( + &self, + block_root: &Hash256, + ) -> Result>>, Error> { + self.get_block_with(block_root, |bytes| { + SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec) + }) + } + + /// Fetch a block from the store, ignoring which fork variant it *should* be for. + pub fn get_block_any_variant>( + &self, + block_root: &Hash256, + ) -> Result>, Error> { self.get_block_with(block_root, SignedBeaconBlock::any_from_ssz_bytes) } @@ -320,11 +443,11 @@ impl, Cold: ItemStore> HotColdDB /// /// This is useful for e.g. ignoring the slot-indicated fork to forcefully load a block as if it /// were for a different fork. - pub fn get_block_with( + pub fn get_block_with>( &self, block_root: &Hash256, - decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, - ) -> Result>, Error> { + decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, + ) -> Result>, Error> { self.hot_db .get_bytes(DBColumn::BeaconBlock.into(), block_root.as_bytes())? .map(|block_bytes| decoder(&block_bytes)) @@ -332,6 +455,15 @@ impl, Cold: ItemStore> HotColdDB .map_err(|e| e.into()) } + /// Load the execution payload for a block from disk. + pub fn get_execution_payload( + &self, + block_root: &Hash256, + ) -> Result, Error> { + self.get_item(block_root)? + .ok_or_else(|| HotColdDBError::MissingExecutionPayload(*block_root).into()) + } + /// Determine whether a block exists in the database. pub fn block_exists(&self, block_root: &Hash256) -> Result { self.hot_db @@ -342,7 +474,9 @@ impl, Cold: ItemStore> HotColdDB pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { self.block_cache.lock().pop(block_root); self.hot_db - .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes()) + .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())?; + self.hot_db + .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes()) } pub fn put_state_summary( @@ -499,7 +633,7 @@ impl, Cold: ItemStore> HotColdDB for op in batch { match op { StoreOp::PutBlock(block_root, block) => { - key_value_batch.push(self.block_as_kv_store_op(&block_root, &block)); + self.block_as_kv_store_ops(&block_root, *block, &mut key_value_batch)?; } StoreOp::PutState(state_root, state) => { @@ -541,12 +675,18 @@ impl, Cold: ItemStore> HotColdDB } } StoreOp::KeyValueOp(kv_op) => key_value_batch.push(kv_op), + StoreOp::DeleteExecutionPayload(block_root) => { + let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_bytes()); + key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + } } } Ok(key_value_batch) } pub fn do_atomically(&self, batch: Vec>) -> Result<(), Error> { + // Update the block cache whilst holding a lock, to ensure that the cache updates atomically + // with the database. let mut block_cache = self.block_cache.lock(); for op in &batch { @@ -571,8 +711,11 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::KeyValueOp(_) => (), + + StoreOp::DeleteExecutionPayload(_) => (), } } + self.hot_db .do_atomically(self.convert_to_kv_batch(batch)?)?; drop(block_cache); @@ -713,7 +856,7 @@ impl, Cold: ItemStore> HotColdDB }) = self.load_hot_state_summary(state_root)? { // Load the latest block, and use it to confirm the validity of this state. - let latest_block = if let Some(block) = self.get_block(&latest_block_root)? { + let latest_block = if let Some(block) = self.get_blinded_block(&latest_block_root)? { block } else { // Dangling state, will be deleted fully once finalization advances past it. @@ -989,34 +1132,33 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_slot: Slot, end_block_hash: Hash256, - ) -> Result>, Error> { - let mut blocks: Vec> = - ParentRootBlockIterator::new(self, end_block_hash) - .map(|result| result.map(|(_, block)| block)) - // Include the block at the end slot (if any), it needs to be - // replayed in order to construct the canonical state at `end_slot`. - .filter(|result| { - result - .as_ref() - .map_or(true, |block| block.slot() <= end_slot) - }) - // Include the block at the start slot (if any). Whilst it doesn't need to be - // applied to the state, it contains a potentially useful state root. - // - // Return `true` on an `Err` so that the `collect` fails, unless the error is a - // `BlockNotFound` error and some blocks are intentionally missing from the DB. - // This complexity is unfortunately necessary to avoid loading the parent of the - // oldest known block -- we can't know that we have all the required blocks until we - // load a block with slot less than the start slot, which is impossible if there are - // no blocks with slot less than the start slot. - .take_while(|result| match result { - Ok(block) => block.slot() >= start_slot, - Err(Error::BlockNotFound(_)) => { - self.get_oldest_block_slot() == self.spec.genesis_slot - } - Err(_) => true, - }) - .collect::>()?; + ) -> Result>>, Error> { + let mut blocks = ParentRootBlockIterator::new(self, end_block_hash) + .map(|result| result.map(|(_, block)| block)) + // Include the block at the end slot (if any), it needs to be + // replayed in order to construct the canonical state at `end_slot`. + .filter(|result| { + result + .as_ref() + .map_or(true, |block| block.slot() <= end_slot) + }) + // Include the block at the start slot (if any). Whilst it doesn't need to be + // applied to the state, it contains a potentially useful state root. + // + // Return `true` on an `Err` so that the `collect` fails, unless the error is a + // `BlockNotFound` error and some blocks are intentionally missing from the DB. + // This complexity is unfortunately necessary to avoid loading the parent of the + // oldest known block -- we can't know that we have all the required blocks until we + // load a block with slot less than the start slot, which is impossible if there are + // no blocks with slot less than the start slot. + .take_while(|result| match result { + Ok(block) => block.slot() >= start_slot, + Err(Error::BlockNotFound(_)) => { + self.get_oldest_block_slot() == self.spec.genesis_slot + } + Err(_) => true, + }) + .collect::, _>>()?; blocks.reverse(); Ok(blocks) } @@ -1028,7 +1170,7 @@ impl, Cold: ItemStore> HotColdDB pub fn replay_blocks( &self, state: BeaconState, - blocks: Vec>, + blocks: Vec>>, target_slot: Slot, state_root_iter: impl Iterator>, ) -> Result, Error> { @@ -1048,6 +1190,11 @@ impl, Cold: ItemStore> HotColdDB }) } + /// Get a reference to the `ChainSpec` used by the database. + pub fn get_chain_spec(&self) -> &ChainSpec { + &self.spec + } + /// Fetch a copy of the current split slot from memory. pub fn get_split_slot(&self) -> Slot { self.split.read_recursive().slot @@ -1227,6 +1374,11 @@ impl, Cold: ItemStore> HotColdDB .map_or(self.spec.genesis_slot, |anchor| anchor.oldest_block_slot) } + /// Return the in-memory configuration used by the database. + pub fn get_config(&self) -> &StoreConfig { + &self.config + } + /// Load previously-stored config from disk. fn load_config(&self) -> Result, Error> { self.hot_db.get(&CONFIG_KEY) diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index 1b442cbc55..736585a72a 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -1 +1,2 @@ pub mod beacon_state; +pub mod execution_payload; diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs new file mode 100644 index 0000000000..b6843c3a63 --- /dev/null +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -0,0 +1,17 @@ +use crate::{DBColumn, Error, StoreItem}; +use ssz::{Decode, Encode}; +use types::{EthSpec, ExecutionPayload}; + +impl StoreItem for ExecutionPayload { + fn db_column() -> DBColumn { + DBColumn::ExecPayload + } + + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 0852622b6d..01a6a1b145 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -3,7 +3,8 @@ use crate::{Error, HotColdDB, ItemStore}; use std::borrow::Cow; use std::marker::PhantomData; use types::{ - typenum::Unsigned, BeaconState, BeaconStateError, EthSpec, Hash256, SignedBeaconBlock, Slot, + typenum::Unsigned, BeaconState, BeaconStateError, BlindedPayload, EthSpec, Hash256, + SignedBeaconBlock, Slot, }; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. @@ -188,7 +189,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, block_hash: Hash256, ) -> Result { let block = store - .get_block(&block_hash)? + .get_blinded_block(&block_hash)? .ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?; let state = store .get_state(&block.state_root(), Some(block.slot()))? @@ -272,7 +273,10 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> } } - fn do_next(&mut self) -> Result)>, Error> { + #[allow(clippy::type_complexity)] + fn do_next( + &mut self, + ) -> Result>)>, Error> { // Stop once we reach the zero parent, otherwise we'll keep returning the genesis // block forever. if self.next_block_root.is_zero() { @@ -282,7 +286,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> let block = if self.decode_any_variant { self.store.get_block_any_variant(&block_root) } else { - self.store.get_block(&block_root) + self.store.get_blinded_block(&block_root) }? .ok_or(Error::BlockNotFound(block_root))?; self.next_block_root = block.message().parent_root(); @@ -294,7 +298,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator for ParentRootBlockIterator<'a, E, Hot, Cold> { - type Item = Result<(Hash256, SignedBeaconBlock), Error>; + type Item = Result<(Hash256, SignedBeaconBlock>), Error>; fn next(&mut self) -> Option { self.do_next().transpose() @@ -322,10 +326,10 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockIterator<'a, T, } } - fn do_next(&mut self) -> Result>, Error> { + fn do_next(&mut self) -> Result>>, Error> { if let Some(result) = self.roots.next() { let (root, _slot) = result?; - self.roots.inner.store.get_block(&root) + self.roots.inner.store.get_blinded_block(&root) } else { Ok(None) } @@ -335,7 +339,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockIterator<'a, T, impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator for BlockIterator<'a, T, Hot, Cold> { - type Item = Result, Error>; + type Item = Result>, Error>; fn next(&mut self) -> Option { self.do_next().transpose() diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index f05fc7836a..55ab5179b1 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -178,10 +178,7 @@ impl KeyValueStore for LevelDB { } /// Iterate through all keys and values in a particular column. - fn iter_column<'a>( - &'a self, - column: DBColumn, - ) -> Box), Error>> + 'a> { + fn iter_column(&self, column: DBColumn) -> ColumnIter { let start_key = BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes())); @@ -201,6 +198,28 @@ impl KeyValueStore for LevelDB { }), ) } + + /// Iterate through all keys and values in a particular column. + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + let start_key = + BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes())); + + let iter = self.db.keys_iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |key| key.matches_column(column)) + .map(move |bytes_key| { + let key = + bytes_key + .remove_column(column) + .ok_or(HotColdDBError::IterationError { + unexpected_key: bytes_key, + })?; + Ok(key) + }), + ) + } } impl ItemStore for LevelDB {} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 8468ab5459..75ba1fcf0d 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -45,6 +45,7 @@ use strum::{EnumString, IntoStaticStr}; pub use types::*; pub type ColumnIter<'a> = Box), Error>> + 'a>; +pub type ColumnKeyIter<'a> = Box> + 'a>; pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Retrieve some bytes in `column` with `key`. @@ -79,11 +80,17 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Compact the database, freeing space used by deleted items. fn compact(&self) -> Result<(), Error>; - /// Iterate through all values in a particular column. + /// Iterate through all keys and values in a particular column. fn iter_column(&self, _column: DBColumn) -> ColumnIter { // Default impl for non LevelDB databases Box::new(std::iter::empty()) } + + /// Iterate through all keys in a particular column. + fn iter_column_keys(&self, _column: DBColumn) -> ColumnKeyIter { + // Default impl for non LevelDB databases + Box::new(std::iter::empty()) + } } pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec { @@ -153,6 +160,7 @@ pub enum StoreOp<'a, E: EthSpec> { DeleteStateTemporaryFlag(Hash256), DeleteBlock(Hash256), DeleteState(Hash256, Option), + DeleteExecutionPayload(Hash256), KeyValueOp(KeyValueStoreOp), } @@ -177,6 +185,9 @@ pub enum DBColumn { /// and then made non-temporary by the deletion of their state root from this column. #[strum(serialize = "bst")] BeaconStateTemporary, + /// Execution payloads for blocks more recent than the finalized checkpoint. + #[strum(serialize = "exp")] + ExecPayload, /// For persisting in-memory state to the database. #[strum(serialize = "bch")] BeaconChain, @@ -203,6 +214,12 @@ pub enum DBColumn { DhtEnrs, } +/// A block from the database, which might have an execution payload or not. +pub enum DatabaseBlock { + Full(SignedBeaconBlock), + Blinded(SignedBeaconBlock>), +} + impl DBColumn { pub fn as_str(self) -> &'static str { self.into() diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index bae051780d..c939fd3f51 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -77,7 +77,7 @@ where None } else { Some( - self.get_block(&block_root)? + self.get_blinded_block(&block_root)? .ok_or(Error::BlockNotFound(block_root))?, ) }; diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index d0f449bab8..9c6bf1ca87 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -3,7 +3,7 @@ //! This service allows task execution on the beacon node for various functionality. use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::info; +use slog::{debug, info, warn}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::Duration; @@ -24,10 +24,30 @@ pub fn spawn_timer( // Warning: `interval_at` panics if `seconds_per_slot` = 0. let mut interval = interval_at(start_instant, Duration::from_secs(seconds_per_slot)); + let per_slot_executor = executor.clone(); let timer_future = async move { + let log = per_slot_executor.log().clone(); loop { interval.tick().await; - beacon_chain.per_slot_task(); + let chain = beacon_chain.clone(); + if let Some(handle) = per_slot_executor + .spawn_blocking_handle(move || chain.per_slot_task(), "timer_per_slot_task") + { + if let Err(e) = handle.await { + warn!( + log, + "Per slot task failed"; + "info" => ?e + ); + } + } else { + debug!( + log, + "Per slot task timer stopped"; + "info" => "shutting down" + ); + break; + } } }; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 22d279d8b7..871b2c4ba8 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -38,6 +38,7 @@ * [Validator Graffiti](./graffiti.md) * [Remote Signing with Web3Signer](./validator-web3signer.md) * [Database Configuration](./advanced_database.md) + * [Database Migrations](./database-migrations.md) * [Advanced Networking](./advanced_networking.md) * [Running a Slasher](./slasher.md) * [Redundancy](./redundancy.md) diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 02a344c74a..178936cf61 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -23,27 +23,39 @@ states to slow down dramatically. A lower _slots per restore point_ value (SPRP) frequent restore points, while a higher SPRP corresponds to less frequent. The table below shows some example values. -| Use Case | SPRP | Yearly Disk Usage | Load Historical State | -| ---------------------- | -------------- | ----------------- | --------------------- | -| Block explorer/analysis | 32 | 1.4 TB | 155 ms | -| Default | 2048 | 23.1 GB | 10.2 s | -| Validator only | 8192 | 5.7 GB | 41 s | +| Use Case | SPRP | Yearly Disk Usage | Load Historical State | +| ---------------------- | -------------- | ----------------- | --------------------- | +| Block explorer/analysis | 32 | 1.4 TB | 155 ms | +| Hobbyist (prev. default) | 2048 | 23.1 GB | 10.2 s | +| Validator only (default) | 8192 | 5.7 GB | 41 s | As you can see, it's a high-stakes trade-off! The relationships to disk usage and historical state load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP is 32, and the maximum is 8192. +The default value is 8192 for databases synced from scratch using Lighthouse v2.2.0 or later, or +2048 for prior versions. Please see the section on [Defaults](#defaults) below. + The values shown in the table are approximate, calculated using a simple heuristic: each `BeaconState` consumes around 18MB of disk space, and each block replayed takes around 5ms. The **Yearly Disk Usage** column shows the approx size of the freezer DB _alone_ (hot DB not included), and the **Load Historical State** time is the worst-case load time for a state in the last slot before a restore point. +### Defaults + +As of Lighthouse v2.2.0, the default slots-per-restore-point value has been increased from 2048 +to 8192 in order to conserve disk space. Existing nodes will continue to use SPRP=2048 unless +re-synced. Note that it is currently not possible to change the SPRP without re-syncing, although +fast re-syncing may be achieved with [Checkpoint Sync](./checkpoint-sync.md). + +### CLI Configuration + To configure your Lighthouse node's database with a non-default SPRP, run your Beacon Node with the `--slots-per-restore-point` flag: ```bash -lighthouse beacon_node --slots-per-restore-point 8192 +lighthouse beacon_node --slots-per-restore-point 32 ``` ## Glossary diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index c79ddab01f..71155a1c23 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -20,11 +20,11 @@ sync. Having a large peer count means that your node must act as an honest RPC server to all your connected peers. If there are many that are syncing, they will -often be requesting a large number of blocks from your node. This means you -node must perform a lot of work reading and responding to these peers. If you +often be requesting a large number of blocks from your node. This means your +node must perform a lot of work reading and responding to these peers. If your node is over-loaded with peers and cannot respond in time, other Lighthouse peers will consider you non-performant and disfavour you from their peer -stores. You node will also have to handle and manage the gossip and extra +stores. Your node will also have to handle and manage the gossip and extra bandwidth that comes from having these extra peers. Having a non-responsive node (due to overloading of connected peers), degrades the network as a whole. @@ -66,7 +66,7 @@ these flags incorrectly can lead to your node being incorrectly added to the global DHT which will degrades the discovery process for all Ethereum consensus peers. The ENR of a Lighthouse node is initially set to be non-contactable. The -in-built discovery mechanism can determine if you node is publicly accessible, +in-built discovery mechanism can determine if your node is publicly accessible, and if it is, it will update your ENR to the correct public IP and port address (meaning you do not need to set it manually). Lighthouse persists its ENR, so on reboot it will re-load the settings it had discovered previously. diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index ea282cf2bc..f5c4542b9e 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -366,6 +366,12 @@ curl "http://localhost:5052/lighthouse/database/info" | jq ```json { "schema_version": 5, + "config": { + "slots_per_restore_point": 2048, + "block_cache_size": 5, + "compact_on_init": false, + "compact_on_prune": true + }, "split": { "slot": "2034912", "state_root": "0x11c8516aa7d4d1613e84121e3a557ceca34618b4c1a38f05b66ad045ff82b33b" diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md new file mode 100644 index 0000000000..ce7ff21328 --- /dev/null +++ b/book/src/database-migrations.md @@ -0,0 +1,121 @@ +# Database Migrations + +Lighthouse uses a versioned database schema to allow its database design to evolve over time. + +Since beacon chain genesis in December 2020 there have been several database upgrades that have +been applied automatically and in a _backwards compatible_ way. + +However, backwards compatibility does not imply the ability to _downgrade_ to a prior version of +Lighthouse after upgrading. To facilitate smooth downgrades, Lighthouse v2.3.0 and above includes a +command for applying database downgrades. + +**Everything on this page applies to the Lighthouse _beacon node_, not to the +validator client or the slasher**. + +## How to apply a database downgrade + +To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. + +1. Make sure you have a copy of the latest version of Lighthouse. This will be the version that + knows about the latest schema change, and has the ability to revert it. +2. Work out the schema version you would like to downgrade to by checking the Lighthouse release + notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version from v8 to v9, then + you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. +3. **Ensure that downgrading is feasible**. Not all schema upgrades can be reverted, and some of + them are time-sensitive. The release notes will state whether a downgrade is available and + whether any caveats apply to it. +4. Work out the parameters for [Running `lighthouse db` correctly][run-correctly], including your + Lighthouse user, your datadir and your network flag. +5. After stopping the beacon node, run the migrate command with the `--to` parameter set to the + schema version you would like to downgrade to. + +``` +sudo -u "$LH_USER" lighthouse db migrate --to "$VERSION" --datadir "$LH_DATADIR" --network "$NET" +``` + +For example if you want to downgrade to Lighthouse v2.1 or v2.2 from v2.3 and you followed Somer +Esat's guide, you would run: + +``` +sudo -u lighthousebeacon lighthouse db migrate --to 8 --datadir /var/lib/lighthouse --network mainnet +``` + +Where `lighthouse` is Lighthouse v2.3.0+. After the downgrade succeeds you can then replace your +global `lighthouse` binary with the older version and start your node again. + +## How to apply a database upgrade + +Database _upgrades_ happen automatically upon installing a new version of Lighthouse. We will +highlight in the release notes when a database upgrade is included, and make note of the schema +versions involved (e.g. v2.3.0 includes an upgrade from v8 to v9). + +They can also be applied using the `--to` parameter to `lighthouse db migrate`. See the section +on downgrades above. + +## How to check the schema version + +To check the schema version of a running Lighthouse instance you can use the HTTP API: + +```bash +curl "http://localhost:5052/lighthouse/database/info" +``` + +```json +{ + "schema_version": 8, + "config": { + "slots_per_restore_point": 8192, + "slots_per_restore_point_set_explicitly": true, + "block_cache_size": 5, + "compact_on_init": false, + "compact_on_prune": true + } +} +``` + +The `schema_version` key indicates that this database is using schema version 8. + +Alternatively, you can check the schema version with the `lighthouse db` command. + +``` +sudo -u lighthousebeacon lighthouse db version --datadir /var/lib/lighthouse --network mainnet +``` + +See the section on [Running `lighthouse db` correctly][run-correctly] for details. + +## How to run `lighthouse db` correctly + +Several conditions need to be met in order to run `lighthouse db`: + +1. The beacon node must be **stopped** (not running). If you are using systemd a command like + `sudo systemctl stop lighthousebeacon` will accomplish this. +2. The command must run as the user that owns the beacon node database. If you are using systemd then + your beacon node might run as a user called `lighthousebeacon`. +3. The `--datadir` flag must be set to the location of the Lighthouse data directory. +4. The `--network` flag must be set to the correct network, e.g. `mainnet`, `prater` or `ropsten`. + +The general form for a `lighthouse db` command is: + +``` +sudo -u "$LH_USER" lighthouse db version --datadir "$LH_DATADIR" --network "$NET" +``` + +If you followed Somer Esat's guide for mainnet: + +``` +sudo systemctl stop lighthousebeacon +``` +``` +sudo -u lighthousebeacon lighthouse db version --datadir /var/lib/lighthouse --network mainnet +``` + +If you followed the CoinCashew guide for mainnet: + +``` +sudo systemctl stop beacon-chain +``` +``` +lighthouse db version --network mainnet +``` + +[run-correctly]: #how-to-run-lighthouse-db-correctly diff --git a/book/src/validator-web3signer.md b/book/src/validator-web3signer.md index 2de641d48b..103f1ccb3c 100644 --- a/book/src/validator-web3signer.md +++ b/book/src/validator-web3signer.md @@ -43,12 +43,15 @@ remote signer: type: web3signer url: "https://my-remote-signer.com:1234" root_certificate_path: /home/paul/my-certificates/my-remote-signer.pem + client_identity_path: /home/paul/my-keys/my-identity-certificate.p12 + client_identity_password: "password" ``` When using this file, the Lighthouse VC will perform duties for the `0xa5566..` validator and defer to the `https://my-remote-signer.com:1234` server to obtain any signatures. It will load a "self-signed" SSL certificate from `/home/paul/my-certificates/my-remote-signer.pem` (on the -filesystem of the VC) to encrypt the communications between the VC and Web3Signer. +filesystem of the VC) to encrypt the communications between the VC and Web3Signer. It will use +SSL client authentication with the "self-signed" certificate in `/home/paul/my-keys/my-identity-certificate.p12`. > The `request_timeout_ms` key can also be specified. Use this key to override the default timeout > with a new timeout in milliseconds. This is the timeout before requests to Web3Signer are diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index b1a2e08d3d..d3a28102f6 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.1.5" +version = "2.2.1" authors = ["Sigma Prime "] edition = "2021" diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index 02333ff599..ccff88ceef 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = "0.7.3" +rand = "0.8.5" eth2_wallet = { path = "../../crypto/eth2_wallet" } eth2_keystore = { path = "../../crypto/eth2_keystore" } filesystem = { path = "../filesystem" } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 4652370c38..3f4831ae17 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -72,6 +72,16 @@ pub enum SigningDefinition { /// The timeout is applied from when the request starts connecting until the response body has finished. #[serde(skip_serializing_if = "Option::is_none")] request_timeout_ms: Option, + + /// Path to a PKCS12 file. + #[serde(skip_serializing_if = "Option::is_none")] + client_identity_path: Option, + + /// Password for the PKCS12 file. + /// + /// An empty password will be used if this is omitted. + #[serde(skip_serializing_if = "Option::is_none")] + client_identity_password: Option, }, } diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index d1eae97ce1..7c3d183940 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -16,4 +16,4 @@ hex = "0.4.2" types = { path = "../../consensus/types"} eth2_ssz = "0.4.1" tree_hash = "0.4.1" -ethabi = "12.0.0" +ethabi = "16.0.0" diff --git a/common/deposit_contract/src/lib.rs b/common/deposit_contract/src/lib.rs index 92ccee3bef..2a9f985d5f 100644 --- a/common/deposit_contract/src/lib.rs +++ b/common/deposit_contract/src/lib.rs @@ -70,13 +70,13 @@ pub fn decode_eth1_tx_data( }; } - let root = decode_token!(Hash256, to_fixed_bytes); + let root = decode_token!(Hash256, into_fixed_bytes); let deposit_data = DepositData { amount, - signature: decode_token!(SignatureBytes, to_bytes), - withdrawal_credentials: decode_token!(Hash256, to_bytes), - pubkey: decode_token!(PublicKeyBytes, to_bytes), + signature: decode_token!(SignatureBytes, into_bytes), + withdrawal_credentials: decode_token!(Hash256, into_bytes), + pubkey: decode_token!(PublicKeyBytes, into_bytes), }; Ok((deposit_data, root)) diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index fecebe8cad..294f8ec8a3 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -15,7 +15,7 @@ lighthouse_network = { path = "../../beacon_node/lighthouse_network" } proto_array = { path = "../../consensus/proto_array", optional = true } eth2_serde_utils = "0.1.1" eth2_keystore = { path = "../../crypto/eth2_keystore" } -libsecp256k1 = "0.6.0" +libsecp256k1 = "0.7.0" ring = "0.16.19" bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } @@ -26,12 +26,10 @@ futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } slashing_protection = { path = "../../validator_client/slashing_protection", optional = true } +mime = "0.3.16" [target.'cfg(target_os = "linux")'.dependencies] -# TODO: update psutil once fix is merged: https://github.com/rust-psutil/rust-psutil/pull/93 -# TODO: Even once the above PR is corrected, there are sub-dependencies that need to be updated. -# psutil = { version = "3.2.0", optional = true } -psutil = { git = "https://github.com/sigp/rust-psutil", rev = "b3e44bc7ec5d545b8cb8ad4e3dffe074b6e6336b", optional = true } +psutil = { version = "3.2.2", optional = true } procinfo = { version = "0.4.2", optional = true } [features] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 856097cfe1..3e965a2bf8 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -579,9 +579,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks( + pub async fn post_beacon_blocks>( &self, - block: &SignedBeaconBlock, + block: &SignedBeaconBlock, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -596,6 +596,26 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST beacon/blinded_blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_blinded_blocks>( + &self, + block: &SignedBeaconBlock, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blinded_blocks"); + + self.post_with_timeout(path, block, self.timeouts.proposal) + .await?; + + Ok(()) + } + /// Path for `v2/beacon/blocks` pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result { let mut path = self.eth_path(V2)?; @@ -1150,24 +1170,24 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks( + pub async fn get_validator_blocks>( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blocks_with_verify_randao(slot, Some(randao_reveal), graffiti, None) .await } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks_with_verify_randao( + pub async fn get_validator_blocks_with_verify_randao>( &self, slot: Slot, randao_reveal: Option<&SignatureBytes>, graffiti: Option<&Graffiti>, verify_randao: Option, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V2)?; path.path_segments_mut() @@ -1194,6 +1214,59 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `GET v2/validator/blinded_blocks/{slot}` + pub async fn get_validator_blinded_blocks>( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result>, Error> { + self.get_validator_blinded_blocks_with_verify_randao( + slot, + Some(randao_reveal), + graffiti, + None, + ) + .await + } + + /// `GET v2/validator/blocks/{slot}` + pub async fn get_validator_blinded_blocks_with_verify_randao< + T: EthSpec, + Payload: ExecPayload, + >( + &self, + slot: Slot, + randao_reveal: Option<&SignatureBytes>, + graffiti: Option<&Graffiti>, + verify_randao: Option, + ) -> Result>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("blinded_blocks") + .push(&slot.to_string()); + + if let Some(randao_reveal) = randao_reveal { + path.query_pairs_mut() + .append_pair("randao_reveal", &randao_reveal.to_string()); + } + + if let Some(graffiti) = graffiti { + path.query_pairs_mut() + .append_pair("graffiti", &graffiti.to_string()); + } + + if let Some(verify_randao) = verify_randao { + path.query_pairs_mut() + .append_pair("verify_randao", &verify_randao.to_string()); + } + + self.get(path).await + } + /// `GET validator/attestation_data?slot,committee_index` pub async fn get_validator_attestation_data( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index a2e4a66c4b..91e6a5558b 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -14,7 +14,7 @@ use reqwest::IntoUrl; use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; -use store::{AnchorInfo, Split}; +use store::{AnchorInfo, Split, StoreConfig}; pub use attestation_performance::{ AttestationPerformance, AttestationPerformanceQuery, AttestationPerformanceStatistics, @@ -334,6 +334,7 @@ impl Eth1Block { #[derive(Debug, Serialize, Deserialize)] pub struct DatabaseInfo { pub schema_version: u64, + pub config: StoreConfig, pub split: Split, pub anchor: Option, } diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index e7c74668e8..5e02ec0bb2 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -476,6 +476,16 @@ impl ValidatorClientHttpClient { Ok(url) } + fn make_remotekeys_url(&self) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("remotekeys"); + Ok(url) + } + /// `GET lighthouse/auth` pub async fn get_auth(&self) -> Result { let mut url = self.server.full.clone(); @@ -509,6 +519,30 @@ impl ValidatorClientHttpClient { let url = self.make_keystores_url()?; self.delete_with_unsigned_response(url, req).await } + + /// `GET eth/v1/remotekeys` + pub async fn get_remotekeys(&self) -> Result { + let url = self.make_remotekeys_url()?; + self.get_unsigned(url).await + } + + /// `POST eth/v1/remotekeys` + pub async fn post_remotekeys( + &self, + req: &ImportRemotekeysRequest, + ) -> Result { + let url = self.make_remotekeys_url()?; + self.post_with_unsigned_response(url, req).await + } + + /// `DELETE eth/v1/remotekeys` + pub async fn delete_remotekeys( + &self, + req: &DeleteRemotekeysRequest, + ) -> Result { + let url = self.make_remotekeys_url()?; + self.delete_with_unsigned_response(url, req).await + } } /// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index ebcce3fab0..d9fe969138 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -102,3 +102,59 @@ pub enum DeleteKeystoreStatus { NotFound, Error, } + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct ListRemotekeysResponse { + pub data: Vec, +} + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct SingleListRemotekeysResponse { + pub pubkey: PublicKeyBytes, + pub url: String, + pub readonly: bool, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct ImportRemotekeysRequest { + pub remote_keys: Vec, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] +pub struct SingleImportRemotekeysRequest { + pub pubkey: PublicKeyBytes, + pub url: String, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum ImportRemotekeyStatus { + Imported, + Duplicate, + Error, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct ImportRemotekeysResponse { + pub data: Vec>, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct DeleteRemotekeysRequest { + pub pubkeys: Vec, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum DeleteRemotekeyStatus { + Deleted, + NotFound, + Error, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct DeleteRemotekeysResponse { + pub data: Vec>, +} diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 9bf7546749..fe9b6a48c0 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -92,4 +92,8 @@ pub struct Web3SignerValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub request_timeout_ms: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_path: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_password: Option, } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 8cd3a1d67f..8ef3582268 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -3,7 +3,9 @@ use crate::Error as ServerError; use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus}; +use mime::{Mime, APPLICATION, JSON, OCTET_STREAM, STAR}; use serde::{Deserialize, Serialize}; +use std::cmp::Reverse; use std::convert::TryFrom; use std::fmt; use std::str::{from_utf8, FromStr}; @@ -1008,15 +1010,37 @@ impl FromStr for Accept { type Err = String; fn from_str(s: &str) -> Result { - match s { - "application/octet-stream" => Ok(Accept::Ssz), - "application/json" => Ok(Accept::Json), - "*/*" => Ok(Accept::Any), - _ => Err("accept header cannot be parsed.".to_string()), - } + let mut mimes = parse_accept(s)?; + + // [q-factor weighting]: https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.2 + // find the highest q-factor supported accept type + mimes.sort_by_key(|m| { + Reverse(m.get_param("q").map_or(1000_u16, |n| { + (n.as_ref().parse::().unwrap_or(0_f32) * 1000_f32) as u16 + })) + }); + mimes + .into_iter() + .find_map(|m| match (m.type_(), m.subtype()) { + (APPLICATION, OCTET_STREAM) => Some(Accept::Ssz), + (APPLICATION, JSON) => Some(Accept::Json), + (STAR, STAR) => Some(Accept::Any), + _ => None, + }) + .ok_or_else(|| "accept header is not supported".to_string()) } } +fn parse_accept(accept: &str) -> Result, String> { + accept + .split(',') + .map(|part| { + part.parse() + .map_err(|e| format!("error parsing Accept header: {}", e)) + }) + .collect() +} + #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, @@ -1045,4 +1069,23 @@ mod tests { } ); } + + #[test] + fn parse_accept_header_content() { + assert_eq!( + Accept::from_str("application/json; charset=utf-8").unwrap(), + Accept::Json + ); + + assert_eq!( + Accept::from_str("text/plain,application/octet-stream;q=0.3,application/json;q=0.9") + .unwrap(), + Accept::Json + ); + + assert_eq!( + Accept::from_str("text/plain"), + Err("accept header is not supported".to_string()) + ) + } } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 7992955dc4..ec8522ac98 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -237,5 +237,6 @@ define_hardcoded_nets!( (mainnet, "mainnet", GENESIS_STATE_IS_KNOWN), (prater, "prater", GENESIS_STATE_IS_KNOWN), (gnosis, "gnosis", GENESIS_STATE_IS_KNOWN), - (kiln, "kiln", GENESIS_STATE_IS_KNOWN) + (kiln, "kiln", GENESIS_STATE_IS_KNOWN), + (ropsten, "ropsten", GENESIS_STATE_IS_KNOWN) ); diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 2e2d781288..5f577bedc3 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] lazy_static = "1.4.0" num-bigint = "0.4.2" -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" hex = "0.4.2" serde_yaml = "0.8.13" serde = "1.0.116" diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 12d7995285..7987899c3d 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -74,9 +74,8 @@ CHURN_LIMIT_QUOTIENT: 4096 # Fork choice # --------------------------------------------------------------- -# TODO: enable once proposer boosting is desired on mainnet -# 70% -# PROPOSER_SCORE_BOOST: 70 +# 40% +PROPOSER_SCORE_BOOST: 40 # Deposit contract # --------------------------------------------------------------- diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 6993c24b8e..cc4e7dcab4 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -74,9 +74,8 @@ CHURN_LIMIT_QUOTIENT: 65536 # Fork choice # --------------------------------------------------------------- -# TODO: enable once proposer boosting is desired on mainnet -# 70% -# PROPOSER_SCORE_BOOST: 70 +# 40% +PROPOSER_SCORE_BOOST: 40 # Deposit contract # --------------------------------------------------------------- diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index 106c95595e..d337c4120a 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -74,8 +74,8 @@ CHURN_LIMIT_QUOTIENT: 65536 # Fork choice # --------------------------------------------------------------- -# 70% -PROPOSER_SCORE_BOOST: 70 +# 40% +PROPOSER_SCORE_BOOST: 40 # Deposit contract # --------------------------------------------------------------- diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/ropsten/boot_enr.yaml new file mode 100644 index 0000000000..27e6e53fc4 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/ropsten/boot_enr.yaml @@ -0,0 +1,4 @@ +# Pari +- enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk +# Teku +- enr:-KG4QMJSJ7DHk6v2p-W8zQ3Xv7FfssZ_1E3p2eY6kN13staMObUonAurqyWhODoeY6edXtV8e9eL9RnhgZ9va2SMDRQMhGV0aDKQS-iVMYAAAHD0AQAAAAAAAIJpZIJ2NIJpcIQDhAAhiXNlY3AyNTZrMaEDXBVUZhhmdy1MYor1eGdRJ4vHYghFKDgjyHgt6sJ-IlCDdGNwgiMog3VkcIIjKA diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml new file mode 100644 index 0000000000..45921aec53 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml @@ -0,0 +1,71 @@ +# Extends the mainnet preset +PRESET_BASE: 'mainnet' +CONFIG_NAME: 'ropsten' + +# Genesis +# --------------------------------------------------------------- +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 100000 +# Monday, May 30th, 2022 3:00:00 PM +UTC +MIN_GENESIS_TIME: 1653318000 +GENESIS_FORK_VERSION: 0x80000069 +GENESIS_DELAY: 604800 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x80000070 +ALTAIR_FORK_EPOCH: 500 +# Merge +BELLATRIX_FORK_VERSION: 0x80000071 +BELLATRIX_FORK_EPOCH: 750 +TERMINAL_TOTAL_DIFFICULTY: 43531756765713534 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Sharding +SHARDING_FORK_VERSION: 0x03001020 +SHARDING_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 3 +DEPOSIT_NETWORK_ID: 3 +DEPOSIT_CONTRACT_ADDRESS: 0x6f22fFbC56eFF051aECF839396DD1eD9aD6BBA9D diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/ropsten/deploy_block.txt new file mode 100644 index 0000000000..dd46f23b62 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/ropsten/deploy_block.txt @@ -0,0 +1 @@ +12269949 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/ropsten/genesis.ssz.zip new file mode 100644 index 0000000000..5f83ed3b65 Binary files /dev/null and b/common/eth2_network_config/built_in_network_configs/ropsten/genesis.ssz.zip differ diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 80bf56a725..b50079f195 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -4,6 +4,7 @@ use target_info::Target; /// Returns the current version of this build of Lighthouse. /// /// A plus-sign (`+`) is appended to the git commit if the tree is dirty. +/// Commit hash is omitted if the sources don't include git information. /// /// ## Example /// @@ -16,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.1.5-", - fallback = "unknown" + prefix = "Lighthouse/v2.2.1-", + fallback = "Lighthouse/v2.2.1" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/lru_cache/src/lib.rs b/common/lru_cache/src/lib.rs index 51df38bcfe..6eecb58c19 100644 --- a/common/lru_cache/src/lib.rs +++ b/common/lru_cache/src/lib.rs @@ -1,7 +1,5 @@ -//! A library to provide fast and efficient LRU Cache's without updating. +//! A library to provide fast and efficient LRU Cache's. -mod space; mod time; -pub use space::LRUCache; pub use time::LRUTimeCache; diff --git a/common/lru_cache/src/space.rs b/common/lru_cache/src/space.rs deleted file mode 100644 index db588632a9..0000000000 --- a/common/lru_cache/src/space.rs +++ /dev/null @@ -1,93 +0,0 @@ -///! This implements a time-based LRU cache for fast checking of duplicates -use fnv::FnvHashSet; -use std::collections::VecDeque; - -/// Cache that stores keys until the size is used up. Does not update elements for efficiency. -pub struct LRUCache -where - Key: Eq + std::hash::Hash + Clone, -{ - /// The duplicate cache. - map: FnvHashSet, - /// An ordered list of keys by order. - list: VecDeque, - // The max size of the cache, - size: usize, -} - -impl LRUCache -where - Key: Eq + std::hash::Hash + Clone, -{ - pub fn new(size: usize) -> Self { - LRUCache { - map: FnvHashSet::default(), - list: VecDeque::new(), - size, - } - } - - /// Determines if the key is in the cache. - pub fn contains(&self, key: &Key) -> bool { - self.map.contains(key) - } - - // Inserts new elements and removes any expired elements. - // - // If the key was not present this returns `true`. If the value was already present this - // returns `false`. - pub fn insert(&mut self, key: Key) -> bool { - // check the cache before removing elements - let result = self.map.insert(key.clone()); - - // add the new key to the list, if it doesn't already exist. - if result { - self.list.push_back(key); - } - // remove any overflow keys - self.update(); - result - } - - /// Removes any expired elements from the cache. - fn update(&mut self) { - // remove any expired results - for _ in 0..self.map.len().saturating_sub(self.size) { - if let Some(key) = self.list.pop_front() { - self.map.remove(&key); - } - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn cache_added_entries_exist() { - let mut cache = LRUCache::new(5); - - cache.insert("t"); - cache.insert("e"); - - // Should report that 't' and 't' already exists - assert!(!cache.insert("t")); - assert!(!cache.insert("e")); - } - - #[test] - fn cache_entries_get_removed() { - let mut cache = LRUCache::new(2); - - cache.insert("t"); - assert!(!cache.insert("t")); - cache.insert("e"); - assert!(!cache.insert("e")); - // add another element to clear the first key - cache.insert("s"); - assert!(!cache.insert("s")); - // should be removed from the cache - assert!(cache.insert("t")); - } -} diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 30f890a8c6..5c0e4c1ca1 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -31,53 +31,45 @@ where } } - // Inserts new elements and removes any expired elements. + // Inserts a new key. It first purges expired elements to do so. // // If the key was not present this returns `true`. If the value was already present this - // returns `false`. - pub fn insert_update(&mut self, key: Key) -> bool { - // check the cache before removing elements - let result = self.map.insert(key.clone()); - - let now = Instant::now(); - - // remove any expired results - while let Some(element) = self.list.pop_front() { - if element.inserted + self.ttl > now { - self.list.push_front(element); - break; - } - self.map.remove(&element.key); - } - - // add the new key to the list, if it doesn't already exist. - if result { - self.list.push_back(Element { key, inserted: now }); - } - - result - } - - // Inserts new element does not expire old elements. - // - // If the key was not present this returns `true`. If the value was already present this - // returns `false`. + // returns `false` and updates the insertion time of the key. pub fn insert(&mut self, key: Key) -> bool { + self.update(); // check the cache before removing elements - let result = self.map.insert(key.clone()); + let is_new = self.map.insert(key.clone()); // add the new key to the list, if it doesn't already exist. - if result { + if is_new { self.list.push_back(Element { key, inserted: Instant::now(), }); + } else { + let position = self + .list + .iter() + .position(|e| e.key == key) + .expect("Key is not new"); + let mut element = self + .list + .remove(position) + .expect("Position is not occupied"); + element.inserted = Instant::now(); + self.list.push_back(element); } - result + #[cfg(test)] + self.check_invariant(); + is_new } /// Removes any expired elements from the cache. pub fn update(&mut self) { + if self.list.is_empty() { + return; + } + let now = Instant::now(); // remove any expired results while let Some(element) = self.list.pop_front() { @@ -87,6 +79,46 @@ where } self.map.remove(&element.key); } + #[cfg(test)] + self.check_invariant() + } + + /// Returns if the key is present after removing expired elements. + pub fn contains(&mut self, key: &Key) -> bool { + self.update(); + self.map.contains(key) + } + + #[cfg(test)] + #[track_caller] + fn check_invariant(&self) { + // The list should be sorted. First element should have the oldest insertion + let mut prev_insertion_time = None; + for e in &self.list { + match prev_insertion_time { + Some(prev) => { + if prev <= e.inserted { + prev_insertion_time = Some(e.inserted); + } else { + panic!("List is not sorted by insertion time") + } + } + None => prev_insertion_time = Some(e.inserted), + } + // The key should be in the map + assert!(self.map.contains(&e.key), "List and map should be in sync"); + } + + for k in &self.map { + let _ = self + .list + .iter() + .position(|e| &e.key == k) + .expect("Map and list should be in sync"); + } + + // One last check to make sure there are no duplicates in the list + assert_eq!(self.list.len(), self.map.len()); } } @@ -107,20 +139,22 @@ mod test { } #[test] - fn cache_entries_expire() { + fn test_reinsertion_updates_timeout() { let mut cache = LRUTimeCache::new(Duration::from_millis(100)); - cache.insert_update("t"); - assert!(!cache.insert_update("t")); - cache.insert_update("e"); - assert!(!cache.insert_update("t")); - assert!(!cache.insert_update("e")); - // sleep until cache expiry - std::thread::sleep(Duration::from_millis(101)); - // add another element to clear previous cache - cache.insert_update("s"); + cache.insert("a"); + cache.insert("b"); - // should be removed from the cache - assert!(cache.insert_update("t")); + std::thread::sleep(Duration::from_millis(20)); + cache.insert("a"); + // a is newer now + + std::thread::sleep(Duration::from_millis(85)); + assert!(cache.contains(&"a"),); + // b was inserted first but was not as recent it should have been removed + assert!(!cache.contains(&"b")); + + std::thread::sleep(Duration::from_millis(16)); + assert!(!cache.contains(&"a")); } } diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index ed7aeb44a8..e42063d675 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -10,7 +10,7 @@ edition = "2021" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" libc = "0.2.79" -parking_lot = "0.11.0" +parking_lot = "0.12.0" jemallocator = { version = "0.3.0", optional = true, features = ["background_threads"] } jemalloc-sys = { version = "0.3.0", optional = true } diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index eaf280398f..d9fdd73126 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -8,4 +8,4 @@ edition = "2021" types = { path = "../../consensus/types" } lazy_static = "1.4.0" lighthouse_metrics = { path = "../lighthouse_metrics" } -parking_lot = "0.11.0" +parking_lot = "0.12.0" diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 660cc1ca01..f344dc4735 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,9 +5,10 @@ authors = ["Sigma Prime "] edition = "2021" [dependencies] -tokio = { version = "1.14.0", features = ["rt"] } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } slog = "2.5.2" futures = "0.3.7" exit-future = "0.2.0" lazy_static = "1.4.0" lighthouse_metrics = { path = "../lighthouse_metrics" } +sloggers = { version = "2.1.1", features = ["json"] } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index 2d3e941a3e..dd525bea50 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -1,10 +1,11 @@ mod metrics; +pub mod test_utils; use futures::channel::mpsc::Sender; use futures::prelude::*; use slog::{crit, debug, o, trace}; use std::sync::Weak; -use tokio::runtime::Runtime; +use tokio::runtime::{Handle, Runtime}; /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] @@ -24,11 +25,51 @@ impl ShutdownReason { } } +/// Provides a `Handle` by either: +/// +/// 1. Holding a `Weak` and calling `Runtime::handle`. +/// 2. Directly holding a `Handle` and cloning it. +/// +/// This enum allows the `TaskExecutor` to work in production where a `Weak` is directly +/// accessible and in testing where the `Runtime` is hidden outside our scope. +#[derive(Clone)] +pub enum HandleProvider { + Runtime(Weak), + Handle(Handle), +} + +impl From for HandleProvider { + fn from(handle: Handle) -> Self { + HandleProvider::Handle(handle) + } +} + +impl From> for HandleProvider { + fn from(weak_runtime: Weak) -> Self { + HandleProvider::Runtime(weak_runtime) + } +} + +impl HandleProvider { + /// Returns a `Handle` to a `Runtime`. + /// + /// May return `None` if the weak reference to the `Runtime` has been dropped (this generally + /// means Lighthouse is shutting down). + pub fn handle(&self) -> Option { + match self { + HandleProvider::Runtime(weak_runtime) => weak_runtime + .upgrade() + .map(|runtime| runtime.handle().clone()), + HandleProvider::Handle(handle) => Some(handle.clone()), + } + } +} + /// A wrapper over a runtime handle which can spawn async and blocking tasks. #[derive(Clone)] pub struct TaskExecutor { /// The handle to the runtime on which tasks are spawned - runtime: Weak, + handle_provider: HandleProvider, /// The receiver exit future which on receiving shuts down the task exit: exit_future::Exit, /// Sender given to tasks, so that if they encounter a state in which execution cannot @@ -43,16 +84,19 @@ pub struct TaskExecutor { impl TaskExecutor { /// Create a new task executor. /// - /// Note: this function is mainly useful in tests. A `TaskExecutor` should be normally obtained from - /// a [`RuntimeContext`](struct.RuntimeContext.html) - pub fn new( - runtime: Weak, + /// ## Note + /// + /// This function should only be used during testing. In production, prefer to obtain an + /// instance of `Self` via a `environment::RuntimeContext` (see the `lighthouse/environment` + /// crate). + pub fn new>( + handle: T, exit: exit_future::Exit, log: slog::Logger, signal_tx: Sender, ) -> Self { Self { - runtime, + handle_provider: handle.into(), exit, signal_tx, log, @@ -62,7 +106,7 @@ impl TaskExecutor { /// Clones the task executor adding a service name. pub fn clone_with_name(&self, service_name: String) -> Self { TaskExecutor { - runtime: self.runtime.clone(), + handle_provider: self.handle_provider.clone(), exit: self.exit.clone(), signal_tx: self.signal_tx.clone(), log: self.log.new(o!("service" => service_name)), @@ -94,8 +138,8 @@ impl TaskExecutor { let mut shutdown_sender = self.shutdown_sender(); let log = self.log.clone(); - if let Some(runtime) = self.runtime.upgrade() { - runtime.spawn(async move { + if let Some(handle) = self.handle() { + handle.spawn(async move { let timer = metrics::start_timer_vec(&metrics::TASKS_HISTOGRAM, &[name]); if let Err(join_error) = task_handle.await { if let Ok(panic) = join_error.try_into_panic() { @@ -160,8 +204,8 @@ impl TaskExecutor { }); int_gauge.inc(); - if let Some(runtime) = self.runtime.upgrade() { - runtime.spawn(future); + if let Some(handle) = self.handle() { + handle.spawn(future); } else { debug!(self.log, "Couldn't spawn task. Runtime shutting down"); } @@ -211,8 +255,8 @@ impl TaskExecutor { }); int_gauge.inc(); - if let Some(runtime) = self.runtime.upgrade() { - Some(runtime.spawn(future)) + if let Some(handle) = self.handle() { + Some(handle.spawn(future)) } else { debug!(self.log, "Couldn't spawn task. Runtime shutting down"); None @@ -242,8 +286,8 @@ impl TaskExecutor { let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]); metrics::inc_gauge_vec(&metrics::BLOCKING_TASKS_COUNT, &[name]); - let join_handle = if let Some(runtime) = self.runtime.upgrade() { - runtime.spawn_blocking(task) + let join_handle = if let Some(handle) = self.handle() { + handle.spawn_blocking(task) } else { debug!(self.log, "Couldn't spawn task. Runtime shutting down"); return None; @@ -268,8 +312,9 @@ impl TaskExecutor { Some(future) } - pub fn runtime(&self) -> Weak { - self.runtime.clone() + /// Returns a `Handle` to the current runtime. + pub fn handle(&self) -> Option { + self.handle_provider.handle() } /// Returns a copy of the `exit_future::Exit`. diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs new file mode 100644 index 0000000000..7d59cdf022 --- /dev/null +++ b/common/task_executor/src/test_utils.rs @@ -0,0 +1,68 @@ +use crate::TaskExecutor; +use slog::Logger; +use sloggers::{null::NullLoggerBuilder, Build}; +use std::sync::Arc; +use tokio::runtime; + +/// Whilst the `TestRuntime` is not necessarily useful in itself, it provides the necessary +/// components for creating a `TaskExecutor` during tests. +/// +/// May create its own runtime or use an existing one. +/// +/// ## Warning +/// +/// This struct should never be used in production, only testing. +pub struct TestRuntime { + runtime: Option>, + _runtime_shutdown: exit_future::Signal, + pub task_executor: TaskExecutor, + pub log: Logger, +} + +impl Default for TestRuntime { + /// If called *inside* an existing runtime, instantiates `Self` using a handle to that runtime. If + /// called *outside* any existing runtime, create a new `Runtime` and keep it alive until the + /// `Self` is dropped. + fn default() -> Self { + let (runtime_shutdown, exit) = exit_future::signal(); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let log = null_logger().unwrap(); + + let (runtime, handle) = if let Ok(handle) = runtime::Handle::try_current() { + (None, handle) + } else { + let runtime = Arc::new( + runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(), + ); + let handle = runtime.handle().clone(); + (Some(runtime), handle) + }; + + let task_executor = TaskExecutor::new(handle, exit, log.clone(), shutdown_tx); + + Self { + runtime, + _runtime_shutdown: runtime_shutdown, + task_executor, + log, + } + } +} + +impl Drop for TestRuntime { + fn drop(&mut self) { + if let Some(runtime) = self.runtime.take() { + Arc::try_unwrap(runtime).unwrap().shutdown_background() + } + } +} + +pub fn null_logger() -> Result { + let log_builder = NullLoggerBuilder; + log_builder + .build() + .map_err(|e| format!("Failed to start null logger: {:?}", e)) +} diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 6fd4730f4b..0eba4cf232 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -14,7 +14,7 @@ bls = { path = "../../crypto/bls" } eth2_keystore = { path = "../../crypto/eth2_keystore" } filesystem = { path = "../filesystem" } types = { path = "../../consensus/types" } -rand = "0.7.3" +rand = "0.8.5" deposit_contract = { path = "../deposit_contract" } tree_hash = "0.4.1" hex = "0.4.2" diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index 95762bbc79..f9433e4a49 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] ethereum-types = "0.12.1" eth2_ssz_types = "0.2.2" -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" eth2_ssz_derive = "0.3.0" eth2_ssz = "0.4.1" tree_hash = "0.4.1" diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index dfa922e5dd..49510e7326 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -6,8 +6,8 @@ use std::marker::PhantomData; use std::time::Duration; use types::{ consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlock, BeaconState, - BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, - IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, + BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, + Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; #[derive(Debug)] @@ -18,6 +18,7 @@ pub enum Error { InvalidProtoArrayBytes(String), InvalidLegacyProtoArrayBytes(String), FailedToProcessInvalidExecutionPayload(String), + FailedToProcessValidExecutionPayload(String), MissingProtoArrayBlock(Hash256), UnknownAncestor { ancestor_slot: Slot, @@ -121,11 +122,22 @@ pub enum PayloadVerificationStatus { /// An EL has declared the execution payload to be valid. Verified, /// An EL has not yet made a determination about the execution payload. - NotVerified, + Optimistic, /// The block is either pre-merge-fork, or prior to the terminal PoW block. Irrelevant, } +impl PayloadVerificationStatus { + /// Returns `true` if the payload was optimistically imported. + pub fn is_optimistic(&self) -> bool { + match self { + PayloadVerificationStatus::Verified => false, + PayloadVerificationStatus::Optimistic => true, + PayloadVerificationStatus::Irrelevant => false, + } + } +} + /// Calculate how far `slot` lies from the start of its epoch. /// /// ## Specification @@ -323,7 +335,7 @@ where } else { // Assume that this payload is valid, since the anchor should be a trusted block and // state. - ExecutionStatus::Valid(message.body.execution_payload.block_hash) + ExecutionStatus::Valid(message.body.execution_payload.block_hash()) } }, ); @@ -512,6 +524,16 @@ where Ok(true) } + /// See `ProtoArrayForkChoice::process_execution_payload_validation` for documentation. + pub fn on_valid_execution_payload( + &mut self, + block_root: Hash256, + ) -> Result<(), Error> { + self.proto_array + .process_execution_payload_validation(block_root) + .map_err(Error::FailedToProcessValidExecutionPayload) + } + /// See `ProtoArrayForkChoice::process_execution_payload_invalidation` for documentation. pub fn on_invalid_execution_payload( &mut self, @@ -541,10 +563,10 @@ where /// The supplied block **must** pass the `state_transition` function as it will not be run /// here. #[allow(clippy::too_many_arguments)] - pub fn on_block( + pub fn on_block>( &mut self, current_slot: Slot, - block: &BeaconBlock, + block: &BeaconBlock, block_root: Hash256, block_delay: Duration, state: &BeaconState, @@ -648,7 +670,7 @@ where .map_err(Error::AfterBlockFailed)?; let execution_status = if let Ok(execution_payload) = block.body().execution_payload() { - let block_hash = execution_payload.block_hash; + let block_hash = execution_payload.block_hash(); if block_hash == ExecutionBlockHash::zero() { // The block is post-merge-fork, but pre-terminal-PoW block. We don't need to verify @@ -657,7 +679,9 @@ where } else { match payload_verification_status { PayloadVerificationStatus::Verified => ExecutionStatus::Valid(block_hash), - PayloadVerificationStatus::NotVerified => ExecutionStatus::Unknown(block_hash), + PayloadVerificationStatus::Optimistic => { + ExecutionStatus::Optimistic(block_hash) + } // It would be a logic error to declare a block irrelevant if it has an // execution payload with a non-zero block hash. PayloadVerificationStatus::Irrelevant => { @@ -933,6 +957,15 @@ where } } + /// Returns an `ExecutionStatus` if the block is known **and** a descendant of the finalized root. + pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { + if self.is_descendant_of_finalized(*block_root) { + self.proto_array.get_block_execution_status(block_root) + } else { + None + } + } + /// Returns the `ProtoBlock` for the justified checkpoint. /// /// ## Notes diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 9b85708f34..7826007516 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,4 +1,4 @@ -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; +use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// @@ -31,9 +31,9 @@ pub trait ForkChoiceStore: Sized { /// Called whenever `ForkChoice::on_block` has verified a block, but not yet added it to fork /// choice. Allows the implementer to performing caching or other housekeeping duties. - fn on_verified_block( + fn on_verified_block>( &mut self, - block: &BeaconBlock, + block: &BeaconBlock, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error>; diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index d4a95994e0..157306dd5f 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -6,4 +6,4 @@ pub use crate::fork_choice::{ PayloadVerificationStatus, PersistedForkChoice, QueuedAttestation, }; pub use fork_choice_store::ForkChoiceStore; -pub use proto_array::{Block as ProtoBlock, InvalidationOperation}; +pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 160800ca50..3f8a2ac6b6 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -344,7 +344,7 @@ impl ForkChoiceTest { let state_root = harness .chain .store - .get_block(&fc.fc_store().justified_checkpoint().root) + .get_blinded_block(&fc.fc_store().justified_checkpoint().root) .unwrap() .unwrap() .message() diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 89cd502cf2..7400d4f54d 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] ethereum-types = "0.12.1" -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" lazy_static = "1.4.0" safe_arith = { path = "../safe_arith" } diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 7e1b73bedc..79b4cb2d80 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -44,6 +44,10 @@ pub enum Error { IrrelevantDescendant { block_root: Hash256, }, + ParentExecutionStatusIsInvalid { + block_root: Hash256, + parent_root: Hash256, + }, } #[derive(Clone, PartialEq, Debug)] diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index f2b51c1fd4..2980c019e8 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -85,7 +85,7 @@ impl ForkChoiceTestDefinition { self.finalized_checkpoint, junk_shuffling_id.clone(), junk_shuffling_id, - ExecutionStatus::Unknown(ExecutionBlockHash::zero()), + ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), ) .expect("should create fork choice struct"); @@ -189,9 +189,9 @@ impl ForkChoiceTestDefinition { justified_checkpoint, finalized_checkpoint, // All blocks are imported optimistically. - execution_status: ExecutionStatus::Unknown(ExecutionBlockHash::from_root( - root, - )), + execution_status: ExecutionStatus::Optimistic( + ExecutionBlockHash::from_root(root), + ), }; fork_choice.process_block(block).unwrap_or_else(|e| { panic!( diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index fb086a96e9..3f7909553b 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -315,6 +315,21 @@ impl ProtoArray { execution_status: block.execution_status, }; + // If the parent has an invalid execution status, return an error before adding the block to + // `self`. + if let Some(parent_index) = node.parent { + let parent = self + .nodes + .get(parent_index) + .ok_or(Error::InvalidNodeIndex(parent_index))?; + if parent.execution_status.is_invalid() { + return Err(Error::ParentExecutionStatusIsInvalid { + block_root: block.root, + parent_root: parent.root, + }); + } + } + self.indices.insert(node.root, node_index); self.nodes.push(node.clone()); @@ -322,20 +337,37 @@ impl ProtoArray { self.maybe_update_best_child_and_descendant(parent_index, node_index)?; if matches!(block.execution_status, ExecutionStatus::Valid(_)) { - self.propagate_execution_payload_validation(parent_index)?; + self.propagate_execution_payload_validation_by_index(parent_index)?; } } Ok(()) } + /// Updates the `block_root` and all ancestors to have validated execution payloads. + /// + /// Returns an error if: + /// + /// - The `block-root` is unknown. + /// - Any of the to-be-validated payloads are already invalid. + pub fn propagate_execution_payload_validation( + &mut self, + block_root: Hash256, + ) -> Result<(), Error> { + let index = *self + .indices + .get(&block_root) + .ok_or(Error::NodeUnknown(block_root))?; + self.propagate_execution_payload_validation_by_index(index) + } + /// Updates the `verified_node_index` and all ancestors to have validated execution payloads. /// /// Returns an error if: /// /// - The `verified_node_index` is unknown. /// - Any of the to-be-validated payloads are already invalid. - pub fn propagate_execution_payload_validation( + fn propagate_execution_payload_validation_by_index( &mut self, verified_node_index: usize, ) -> Result<(), Error> { @@ -355,7 +387,7 @@ impl ProtoArray { ExecutionStatus::Irrelevant(_) => return Ok(()), // The block has an unknown status, set it to valid since any ancestor of a valid // payload can be considered valid. - ExecutionStatus::Unknown(payload_block_hash) => { + ExecutionStatus::Optimistic(payload_block_hash) => { node.execution_status = ExecutionStatus::Valid(payload_block_hash); if let Some(parent_index) = node.parent { parent_index @@ -426,7 +458,7 @@ impl ProtoArray { match node.execution_status { ExecutionStatus::Valid(hash) | ExecutionStatus::Invalid(hash) - | ExecutionStatus::Unknown(hash) => { + | ExecutionStatus::Optimistic(hash) => { // If we're no longer processing the `head_block_root` and the last valid // ancestor is unknown, exit this loop and proceed to invalidate and // descendants of `head_block_root`/`latest_valid_ancestor_root`. @@ -460,7 +492,7 @@ impl ProtoArray { // It might be new knowledge that this block is valid, ensure that it and all // ancestors are marked as valid. - self.propagate_execution_payload_validation(index)?; + self.propagate_execution_payload_validation_by_index(index)?; break; } } @@ -484,7 +516,7 @@ impl ProtoArray { payload_block_hash: *hash, }) } - ExecutionStatus::Unknown(hash) => { + ExecutionStatus::Optimistic(hash) => { invalidated_indices.insert(index); node.execution_status = ExecutionStatus::Invalid(*hash); @@ -548,7 +580,7 @@ impl ProtoArray { payload_block_hash: *hash, }) } - ExecutionStatus::Unknown(hash) | ExecutionStatus::Invalid(hash) => { + ExecutionStatus::Optimistic(hash) | ExecutionStatus::Invalid(hash) => { node.execution_status = ExecutionStatus::Invalid(*hash) } ExecutionStatus::Irrelevant(_) => { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 007f262fdd..88bf7840c2 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,5 +1,5 @@ use crate::error::Error; -use crate::proto_array::{InvalidationOperation, Iter, ProposerBoost, ProtoArray}; +use crate::proto_array::{InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode}; use crate::ssz_container::SszContainer; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; @@ -28,7 +28,7 @@ pub enum ExecutionStatus { /// An EL has determined that the payload is invalid. Invalid(ExecutionBlockHash), /// An EL has not yet verified the execution payload. - Unknown(ExecutionBlockHash), + Optimistic(ExecutionBlockHash), /// The block is either prior to the merge fork, or after the merge fork but before the terminal /// PoW block has been found. /// @@ -52,30 +52,48 @@ impl ExecutionStatus { match self { ExecutionStatus::Valid(hash) | ExecutionStatus::Invalid(hash) - | ExecutionStatus::Unknown(hash) => Some(*hash), + | ExecutionStatus::Optimistic(hash) => Some(*hash), ExecutionStatus::Irrelevant(_) => None, } } /// Returns `true` if the block: /// - /// - Has execution enabled + /// - Has a valid payload, OR + /// - Does not have execution enabled. + /// + /// Whenever this function returns `true`, the block is *fully valid*. + pub fn is_valid_or_irrelevant(&self) -> bool { + matches!( + self, + ExecutionStatus::Valid(_) | ExecutionStatus::Irrelevant(_) + ) + } + + /// Returns `true` if the block: + /// + /// - Has execution enabled, AND /// - Has a valid payload - pub fn is_valid(&self) -> bool { + /// + /// This function will return `false` for any block from a slot prior to the Bellatrix fork. + /// This means that some blocks that are perfectly valid will still receive a `false` response. + /// See `Self::is_valid_or_irrelevant` for a function that will always return `true` given any + /// perfectly valid block. + pub fn is_valid_and_post_bellatrix(&self) -> bool { matches!(self, ExecutionStatus::Valid(_)) } /// Returns `true` if the block: /// - /// - Has execution enabled + /// - Has execution enabled, AND /// - Has a payload that has not yet been verified by an EL. - pub fn is_not_verified(&self) -> bool { - matches!(self, ExecutionStatus::Unknown(_)) + pub fn is_optimistic(&self) -> bool { + matches!(self, ExecutionStatus::Optimistic(_)) } /// Returns `true` if the block: /// - /// - Has execution enabled + /// - Has execution enabled, AND /// - Has an invalid payload. pub fn is_invalid(&self) -> bool { matches!(self, ExecutionStatus::Invalid(_)) @@ -188,6 +206,16 @@ impl ProtoArrayForkChoice { }) } + /// See `ProtoArray::propagate_execution_payload_validation` for documentation. + pub fn process_execution_payload_validation( + &mut self, + block_root: Hash256, + ) -> Result<(), String> { + self.proto_array + .propagate_execution_payload_validation(block_root) + .map_err(|e| format!("Failed to process valid payload: {:?}", e)) + } + /// See `ProtoArray::propagate_execution_payload_invalidation` for documentation. pub fn process_execution_payload_invalidation( &mut self, @@ -284,9 +312,13 @@ impl ProtoArrayForkChoice { self.proto_array.indices.contains_key(block_root) } - pub fn get_block(&self, block_root: &Hash256) -> Option { + fn get_proto_node(&self, block_root: &Hash256) -> Option<&ProtoNode> { let block_index = self.proto_array.indices.get(block_root)?; - let block = self.proto_array.nodes.get(*block_index)?; + self.proto_array.nodes.get(*block_index) + } + + pub fn get_block(&self, block_root: &Hash256) -> Option { + let block = self.get_proto_node(block_root)?; let parent_root = block .parent .and_then(|i| self.proto_array.nodes.get(i)) @@ -315,6 +347,12 @@ impl ProtoArrayForkChoice { } } + /// Returns the `block.execution_status` field, if the block is present. + pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { + let block = self.get_proto_node(block_root)?; + Some(block.execution_status) + } + /// Returns the weight of a given block. pub fn get_weight(&self, block_root: &Hash256) -> Option { let block_index = self.proto_array.indices.get(block_root)?; diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index dd41c56fad..51fb749625 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -19,7 +19,7 @@ safe_arith = { path = "../safe_arith" } tree_hash = "0.4.1" types = { path = "../types", default-features = false } rayon = "1.4.1" -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" int_to_bytes = { path = "../int_to_bytes" } smallvec = "1.6.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index d818a4a969..b31e2d6c33 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -4,10 +4,12 @@ use crate::{ VerifyBlockRoot, }; use std::marker::PhantomData; -use types::{BeaconState, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; -type PreBlockHook<'a, E, Error> = - Box, &SignedBeaconBlock) -> Result<(), Error> + 'a>; +type PreBlockHook<'a, E, Error> = Box< + dyn FnMut(&mut BeaconState, &SignedBeaconBlock>) -> Result<(), Error> + + 'a, +>; type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; type PreSlotHook<'a, E, Error> = Box) -> Result<(), Error> + 'a>; type PostSlotHook<'a, E, Error> = Box< @@ -155,7 +157,7 @@ where fn get_state_root( &mut self, slot: Slot, - blocks: &[SignedBeaconBlock], + blocks: &[SignedBeaconBlock>], i: usize, ) -> Result, Error> { // If a state root iterator is configured, use it to find the root. @@ -189,7 +191,7 @@ where /// after the blocks have been applied. pub fn apply_blocks( mut self, - blocks: Vec>, + blocks: Vec>>, target_slot: Option, ) -> Result { for (i, block) in blocks.iter().enumerate() { diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 0a12226da9..fdd3f95a65 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,6 +1,9 @@ use std::marker::PhantomData; use tree_hash::TreeHash; -use types::{BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock, + Slot, +}; #[derive(Debug)] pub struct ConsensusContext { @@ -61,9 +64,9 @@ impl ConsensusContext { self } - pub fn get_current_block_root( + pub fn get_current_block_root>( &mut self, - block: &SignedBeaconBlock, + block: &SignedBeaconBlock, ) -> Result { self.check_slot(block.slot())?; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index bdad5d93ac..07878110b5 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -88,9 +88,9 @@ pub enum VerifyBlockRoot { /// re-calculating the root when it is already known. Note `block_root` should be equal to the /// tree hash root of the block, NOT the signing root of the block. This function takes /// care of mixing in the domain. -pub fn per_block_processing( +pub fn per_block_processing>( state: &mut BeaconState, - signed_block: &SignedBeaconBlock, + signed_block: &SignedBeaconBlock, block_signature_strategy: BlockSignatureStrategy, verify_block_root: VerifyBlockRoot, ctxt: &mut ConsensusContext, @@ -131,7 +131,13 @@ pub fn per_block_processing( BlockSignatureStrategy::VerifyRandao => VerifySignatures::False, }; - let proposer_index = process_block_header(state, block, verify_block_root, ctxt, spec)?; + let proposer_index = process_block_header( + state, + block.temporary_block_header(), + verify_block_root, + ctxt, + spec, + )?; if verify_signatures.is_true() { verify_block_signature(state, signed_block, ctxt, spec)?; @@ -174,28 +180,28 @@ pub fn per_block_processing( /// Processes the block header, returning the proposer index. pub fn process_block_header( state: &mut BeaconState, - block: BeaconBlockRef<'_, T>, + block_header: BeaconBlockHeader, verify_block_root: VerifyBlockRoot, ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result> { // Verify that the slots match verify!( - block.slot() == state.slot(), + block_header.slot == state.slot(), HeaderInvalid::StateSlotMismatch ); // Verify that the block is newer than the latest block header verify!( - block.slot() > state.latest_block_header().slot, + block_header.slot > state.latest_block_header().slot, HeaderInvalid::OlderThanLatestBlockHeader { - block_slot: block.slot(), + block_slot: block_header.slot, latest_block_header_slot: state.latest_block_header().slot, } ); // Verify that proposer index is the correct index - let proposer_index = block.proposer_index(); + let proposer_index = block_header.proposer_index; let state_proposer_index = ctxt.get_proposer_index(state, spec)?; verify!( proposer_index == state_proposer_index, @@ -208,15 +214,15 @@ pub fn process_block_header( if verify_block_root == VerifyBlockRoot::True { let expected_previous_block_root = state.latest_block_header().tree_hash_root(); verify!( - block.parent_root() == expected_previous_block_root, + block_header.parent_root == expected_previous_block_root, HeaderInvalid::ParentBlockRootMismatch { state: expected_previous_block_root, - block: block.parent_root(), + block: block_header.parent_root, } ); } - *state.latest_block_header_mut() = block.temporary_block_header(); + *state.latest_block_header_mut() = block_header; // Verify proposer is not slashed verify!( @@ -224,15 +230,15 @@ pub fn process_block_header( HeaderInvalid::ProposerSlashed(proposer_index) ); - Ok(proposer_index) + Ok(proposer_index as u64) } /// Verifies the signature of a block. /// /// Spec v0.12.1 -pub fn verify_block_signature( +pub fn verify_block_signature>( state: &BeaconState, - block: &SignedBeaconBlock, + block: &SignedBeaconBlock, ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockOperationError> { @@ -254,9 +260,9 @@ pub fn verify_block_signature( /// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// `state.latest_randao_mixes`. -pub fn process_randao( +pub fn process_randao>( state: &mut BeaconState, - block: BeaconBlockRef<'_, T>, + block: BeaconBlockRef<'_, T, Payload>, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { @@ -318,34 +324,34 @@ pub fn get_new_eth1_data( /// Contains a partial set of checks from the `process_execution_payload` function: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload -pub fn partially_verify_execution_payload( +pub fn partially_verify_execution_payload>( state: &BeaconState, - payload: &ExecutionPayload, + payload: &Payload, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { if is_merge_transition_complete(state) { block_verify!( - payload.parent_hash == state.latest_execution_payload_header()?.block_hash, + payload.parent_hash() == state.latest_execution_payload_header()?.block_hash, BlockProcessingError::ExecutionHashChainIncontiguous { expected: state.latest_execution_payload_header()?.block_hash, - found: payload.parent_hash, + found: payload.parent_hash(), } ); } block_verify!( - payload.prev_randao == *state.get_randao_mix(state.current_epoch())?, + payload.prev_randao() == *state.get_randao_mix(state.current_epoch())?, BlockProcessingError::ExecutionRandaoMismatch { expected: *state.get_randao_mix(state.current_epoch())?, - found: payload.prev_randao, + found: payload.prev_randao(), } ); let timestamp = compute_timestamp_at_slot(state, spec)?; block_verify!( - payload.timestamp == timestamp, + payload.timestamp() == timestamp, BlockProcessingError::ExecutionInvalidTimestamp { expected: timestamp, - found: payload.timestamp, + found: payload.timestamp(), } ); @@ -359,29 +365,14 @@ pub fn partially_verify_execution_payload( /// Partially equivalent to the `process_execution_payload` function: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload -pub fn process_execution_payload( +pub fn process_execution_payload>( state: &mut BeaconState, - payload: &ExecutionPayload, + payload: &Payload, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { partially_verify_execution_payload(state, payload, spec)?; - *state.latest_execution_payload_header_mut()? = ExecutionPayloadHeader { - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom.clone(), - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - extra_data: payload.extra_data.clone(), - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - transactions_root: payload.transactions.tree_hash_root(), - }; + *state.latest_execution_payload_header_mut()? = payload.to_execution_payload_header(); Ok(()) } @@ -398,24 +389,21 @@ pub fn is_merge_transition_complete(state: &BeaconState) -> bool .unwrap_or(false) } /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_merge_transition_block -pub fn is_merge_transition_block( +pub fn is_merge_transition_block>( state: &BeaconState, - body: BeaconBlockBodyRef, + body: BeaconBlockBodyRef, ) -> bool { body.execution_payload() - .map(|payload| { - !is_merge_transition_complete(state) && *payload != >::default() - }) + .map(|payload| !is_merge_transition_complete(state) && *payload != Payload::default()) .unwrap_or(false) } /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#is_execution_enabled -pub fn is_execution_enabled( +pub fn is_execution_enabled>( state: &BeaconState, - body: BeaconBlockBodyRef, + body: BeaconBlockBodyRef, ) -> bool { is_merge_transition_block(state, body) || is_merge_transition_complete(state) } - /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#compute_timestamp_at_slot pub fn compute_timestamp_at_slot( state: &BeaconState, diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 28044a462c..78205ca92c 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -7,7 +7,7 @@ use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256, IndexedAttestation, + BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, IndexedAttestation, SignedBeaconBlock, }; @@ -117,11 +117,11 @@ where /// contains invalid signatures on deposits._ /// /// See `Self::verify` for more detail. - pub fn verify_entire_block( + pub fn verify_entire_block>( state: &'a BeaconState, get_pubkey: F, decompressor: D, - block: &'a SignedBeaconBlock, + block: &'a SignedBeaconBlock, block_root: Option, spec: &'a ChainSpec, ) -> Result<()> { @@ -131,9 +131,9 @@ where } /// Includes all signatures on the block (except the deposit signatures) for verification. - pub fn include_all_signatures( + pub fn include_all_signatures>( &mut self, - block: &'a SignedBeaconBlock, + block: &'a SignedBeaconBlock, block_root: Option, ) -> Result<()> { self.include_block_proposal(block, block_root)?; @@ -144,9 +144,9 @@ where /// Includes all signatures on the block (except the deposit signatures and the proposal /// signature) for verification. - pub fn include_all_signatures_except_proposal( + pub fn include_all_signatures_except_proposal>( &mut self, - block: &'a SignedBeaconBlock, + block: &'a SignedBeaconBlock, ) -> Result<()> { self.include_randao_reveal(block)?; self.include_proposer_slashings(block)?; @@ -160,9 +160,9 @@ where } /// Includes the block signature for `self.block` for verification. - pub fn include_block_proposal( + pub fn include_block_proposal>( &mut self, - block: &'a SignedBeaconBlock, + block: &'a SignedBeaconBlock, block_root: Option, ) -> Result<()> { let set = block_proposal_signature_set( @@ -177,7 +177,10 @@ where } /// Includes the randao signature for `self.block` for verification. - pub fn include_randao_reveal(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + pub fn include_randao_reveal>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { let set = randao_signature_set( self.state, self.get_pubkey.clone(), @@ -189,7 +192,10 @@ where } /// Includes all signatures in `self.block.body.proposer_slashings` for verification. - pub fn include_proposer_slashings(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + pub fn include_proposer_slashings>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { self.sets .sets .reserve(block.message().body().proposer_slashings().len() * 2); @@ -215,7 +221,10 @@ where } /// Includes all signatures in `self.block.body.attester_slashings` for verification. - pub fn include_attester_slashings(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + pub fn include_attester_slashings>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { self.sets .sets .reserve(block.message().body().attester_slashings().len() * 2); @@ -241,9 +250,9 @@ where } /// Includes all signatures in `self.block.body.attestations` for verification. - pub fn include_attestations( + pub fn include_attestations>( &mut self, - block: &'a SignedBeaconBlock, + block: &'a SignedBeaconBlock, ) -> Result>> { self.sets .sets @@ -280,7 +289,10 @@ where } /// Includes all signatures in `self.block.body.voluntary_exits` for verification. - pub fn include_exits(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + pub fn include_exits>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { self.sets .sets .reserve(block.message().body().voluntary_exits().len()); @@ -301,7 +313,10 @@ where } /// Include the signature of the block's sync aggregate (if it exists) for verification. - pub fn include_sync_aggregate(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + pub fn include_sync_aggregate>( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { if let Ok(sync_aggregate) = block.message().body().sync_aggregate() { if let Some(signature_set) = sync_aggregate_signature_set( &self.decompressor, diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index f830e62541..9339afa0d4 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -9,9 +9,9 @@ use safe_arith::SafeArith; use std::sync::Arc; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; -pub fn process_operations<'a, T: EthSpec>( +pub fn process_operations<'a, T: EthSpec, Payload: ExecPayload>( state: &mut BeaconState, - block_body: BeaconBlockBodyRef<'a, T>, + block_body: BeaconBlockBodyRef<'a, T, Payload>, proposer_index: u64, verify_signatures: VerifySignatures, spec: &ChainSpec, @@ -220,9 +220,9 @@ pub fn process_attester_slashings( } /// Wrapper function to handle calling the correct version of `process_attestations` based on /// the fork. -pub fn process_attestations<'a, T: EthSpec>( +pub fn process_attestations<'a, T: EthSpec, Payload: ExecPayload>( state: &mut BeaconState, - block_body: BeaconBlockBodyRef<'a, T>, + block_body: BeaconBlockBodyRef<'a, T, Payload>, proposer_index: u64, verify_signatures: VerifySignatures, spec: &ChainSpec, diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 326bc76803..73cef3a246 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -8,10 +8,11 @@ use std::borrow::Cow; use tree_hash::TreeHash; use types::{ AggregateSignature, AttesterSlashing, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, - DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, IndexedAttestation, - ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedBeaconBlockHeader, SignedContributionAndProof, SignedRoot, - SignedVoluntaryExit, SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, + DepositData, Domain, Epoch, EthSpec, ExecPayload, Fork, Hash256, InconsistentFork, + IndexedAttestation, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, + SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, + SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, + SyncAggregatorSelectionData, Unsigned, }; pub type Result = std::result::Result; @@ -70,10 +71,10 @@ where } /// A signature set that is valid if a block was signed by the expected block producer. -pub fn block_proposal_signature_set<'a, T, F>( +pub fn block_proposal_signature_set<'a, T, F, Payload: ExecPayload>( state: &'a BeaconState, get_pubkey: F, - signed_block: &'a SignedBeaconBlock, + signed_block: &'a SignedBeaconBlock, block_root: Option, spec: &'a ChainSpec, ) -> Result> @@ -107,8 +108,8 @@ where /// Unlike `block_proposal_signature_set` this does **not** check that the proposer index is /// correct according to the shuffling. It should only be used if no suitable `BeaconState` is /// available. -pub fn block_proposal_signature_set_from_parts<'a, T, F>( - signed_block: &'a SignedBeaconBlock, +pub fn block_proposal_signature_set_from_parts<'a, T, F, Payload: ExecPayload>( + signed_block: &'a SignedBeaconBlock, block_root: Option, proposer_index: u64, fork: &Fork, @@ -151,10 +152,10 @@ where } /// A signature set that is valid if the block proposers randao reveal signature is correct. -pub fn randao_signature_set<'a, T, F>( +pub fn randao_signature_set<'a, T, F, Payload: ExecPayload>( state: &'a BeaconState, get_pubkey: F, - block: BeaconBlockRef<'a, T>, + block: BeaconBlockRef<'a, T, Payload>, spec: &'a ChainSpec, ) -> Result> where diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index 2d74fcedfc..47e34fd2b6 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -63,7 +63,7 @@ pub fn process_rewards_and_penalties( /// /// Spec v1.1.0 pub fn get_flag_index_deltas( - deltas: &mut Vec, + deltas: &mut [Delta], state: &BeaconState, flag_index: usize, total_active_balance: u64, @@ -112,7 +112,7 @@ pub fn get_flag_weight(flag_index: usize) -> Result { } pub fn get_inactivity_penalty_deltas( - deltas: &mut Vec, + deltas: &mut [Delta], state: &BeaconState, participation_cache: &ParticipationCache, spec: &ChainSpec, diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index ada4fba403..1c7ad5f02a 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -12,7 +12,7 @@ harness = false criterion = "0.3.3" [dependencies] -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" ethereum-types = "0.12.1" [features] diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml index bdc7244032..ab080eac06 100644 --- a/consensus/tree_hash/Cargo.toml +++ b/consensus/tree_hash/Cargo.toml @@ -7,7 +7,7 @@ license = "Apache-2.0" description = "Efficient Merkle-hashing as used in Ethereum 2.0" [dev-dependencies] -rand = "0.7.3" +rand = "0.8.5" tree_hash_derive = "0.4.0" types = { path = "../types" } beacon_chain = { path = "../../beacon_node/beacon_chain" } @@ -16,7 +16,7 @@ eth2_ssz_derive = "0.3.0" [dependencies] ethereum-types = "0.12.1" -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" smallvec = "1.6.1" [features] diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index fbed09407e..6a156f9ae0 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -14,12 +14,12 @@ compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum-types = "0.12.1" -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" hex = "0.4.2" int_to_bytes = { path = "../int_to_bytes" } log = "0.4.11" rayon = "1.4.1" -rand = "0.7.3" +rand = "0.8.5" safe_arith = { path = "../safe_arith" } serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" @@ -31,7 +31,7 @@ swap_or_not_shuffle = { path = "../swap_or_not_shuffle" } test_random_derive = { path = "../../common/test_random_derive" } tree_hash = "0.4.1" tree_hash_derive = "0.4.0" -rand_xorshift = "0.2.0" +rand_xorshift = "0.3.0" cached_tree_hash = { path = "../cached_tree_hash" } serde_yaml = "0.8.13" tempfile = "3.1.0" @@ -41,9 +41,9 @@ arbitrary = { version = "1.0", features = ["derive"], optional = true } eth2_serde_utils = "0.1.1" regex = "1.5.5" lazy_static = "1.4.0" -parking_lot = "0.11.1" +parking_lot = "0.12.0" itertools = "0.10.0" -superstruct = "0.4.0" +superstruct = "0.5.0" serde_json = "1.0.74" smallvec = "1.8.0" milhouse = { git = "https://github.com/sigp/milhouse", branch = "main" } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index b0c3094e4f..1957c34eaa 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -9,6 +9,7 @@ use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; +use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -29,23 +30,25 @@ use tree_hash_derive::TreeHash; TestRandom, Derivative, ), - derivative(PartialEq, Hash(bound = "T: EthSpec")), - serde(bound = "T: EthSpec", deny_unknown_fields), + derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: ExecPayload")), + serde(bound = "T: EthSpec, Payload: ExecPayload", deny_unknown_fields), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), ), ref_attributes( derive(Debug, PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent") - ) + ), + map_ref_into(BeaconBlockBodyRef), + map_ref_mut_into(BeaconBlockBodyRefMut) )] #[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] -#[serde(bound = "T: EthSpec")] +#[serde(bound = "T: EthSpec, Payload: ExecPayload")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct BeaconBlock { +pub struct BeaconBlock = FullPayload> { #[superstruct(getter(copy))] pub slot: Slot, #[superstruct(getter(copy))] @@ -56,17 +59,17 @@ pub struct BeaconBlock { #[superstruct(getter(copy))] pub state_root: Hash256, #[superstruct(only(Base), partial_getter(rename = "body_base"))] - pub body: BeaconBlockBodyBase, + pub body: BeaconBlockBodyBase, #[superstruct(only(Altair), partial_getter(rename = "body_altair"))] - pub body: BeaconBlockBodyAltair, + pub body: BeaconBlockBodyAltair, #[superstruct(only(Merge), partial_getter(rename = "body_merge"))] - pub body: BeaconBlockBodyMerge, + pub body: BeaconBlockBodyMerge, } -impl SignedRoot for BeaconBlock {} -impl<'a, T: EthSpec> SignedRoot for BeaconBlockRef<'a, T> {} +impl> SignedRoot for BeaconBlock {} +impl<'a, T: EthSpec, Payload: ExecPayload> SignedRoot for BeaconBlockRef<'a, T, Payload> {} -impl BeaconBlock { +impl> BeaconBlock { /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { if spec.bellatrix_fork_epoch == Some(T::genesis_epoch()) { @@ -114,12 +117,12 @@ impl BeaconBlock { } /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. - pub fn body(&self) -> BeaconBlockBodyRef<'_, T> { + pub fn body(&self) -> BeaconBlockBodyRef<'_, T, Payload> { self.to_ref().body() } /// Convenience accessor for the `body` as a `BeaconBlockBodyRefMut`. - pub fn body_mut(&mut self) -> BeaconBlockBodyRefMut<'_, T> { + pub fn body_mut(&mut self) -> BeaconBlockBodyRefMut<'_, T, Payload> { self.to_mut().body_mut() } @@ -160,7 +163,7 @@ impl BeaconBlock { fork: &Fork, genesis_validators_root: Hash256, spec: &ChainSpec, - ) -> SignedBeaconBlock { + ) -> SignedBeaconBlock { let domain = spec.get_domain( self.epoch(), Domain::BeaconProposer, @@ -173,7 +176,7 @@ impl BeaconBlock { } } -impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { +impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRef<'a, T, Payload> { /// Returns the name of the fork pertaining to `self`. /// /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork @@ -197,21 +200,18 @@ impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { } /// Convenience accessor for the `body` as a `BeaconBlockBodyRef`. - pub fn body(&self) -> BeaconBlockBodyRef<'a, T> { - match self { - BeaconBlockRef::Base(block) => BeaconBlockBodyRef::Base(&block.body), - BeaconBlockRef::Altair(block) => BeaconBlockBodyRef::Altair(&block.body), - BeaconBlockRef::Merge(block) => BeaconBlockBodyRef::Merge(&block.body), - } + pub fn body(&self) -> BeaconBlockBodyRef<'a, T, Payload> { + map_beacon_block_ref_into_beacon_block_body_ref!(&'a _, *self, |block, cons| cons( + &block.body + )) } /// Return the tree hash root of the block's body. pub fn body_root(&self) -> Hash256 { - match self { - BeaconBlockRef::Base(block) => block.body.tree_hash_root(), - BeaconBlockRef::Altair(block) => block.body.tree_hash_root(), - BeaconBlockRef::Merge(block) => block.body.tree_hash_root(), - } + map_beacon_block_ref!(&'a _, *self, |block, cons| { + let _: Self = cons(block); + block.body.tree_hash_root() + }) } /// Returns the epoch corresponding to `self.slot()`. @@ -240,23 +240,21 @@ impl<'a, T: EthSpec> BeaconBlockRef<'a, T> { /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result<&ExecutionPayload, Error> { + pub fn execution_payload(&self) -> Result<&Payload, Error> { self.body().execution_payload() } } -impl<'a, T: EthSpec> BeaconBlockRefMut<'a, T> { +impl<'a, T: EthSpec, Payload: ExecPayload> BeaconBlockRefMut<'a, T, Payload> { /// Convert a mutable reference to a beacon block to a mutable ref to its body. - pub fn body_mut(self) -> BeaconBlockBodyRefMut<'a, T> { - match self { - BeaconBlockRefMut::Base(block) => BeaconBlockBodyRefMut::Base(&mut block.body), - BeaconBlockRefMut::Altair(block) => BeaconBlockBodyRefMut::Altair(&mut block.body), - BeaconBlockRefMut::Merge(block) => BeaconBlockBodyRefMut::Merge(&mut block.body), - } + pub fn body_mut(self) -> BeaconBlockBodyRefMut<'a, T, Payload> { + map_beacon_block_ref_mut_into_beacon_block_body_ref_mut!(&'a _, self, |block, cons| cons( + &mut block.body + )) } } -impl BeaconBlockBase { +impl> BeaconBlockBase { /// Returns an empty block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { BeaconBlockBase { @@ -277,6 +275,7 @@ impl BeaconBlockBase { attestations: VariableList::empty(), deposits: VariableList::empty(), voluntary_exits: VariableList::empty(), + _phantom: PhantomData, }, } } @@ -343,7 +342,7 @@ impl BeaconBlockBase { signature: Signature::empty(), }; - let mut block = BeaconBlockBase::::empty(spec); + let mut block = BeaconBlockBase::::empty(spec); for _ in 0..T::MaxProposerSlashings::to_usize() { block .body @@ -376,7 +375,7 @@ impl BeaconBlockBase { } } -impl BeaconBlockAltair { +impl> BeaconBlockAltair { /// Returns an empty Altair block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { @@ -398,13 +397,14 @@ impl BeaconBlockAltair { deposits: VariableList::empty(), voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), + _phantom: PhantomData, }, } } /// Return an Altair block where the block has maximum size. pub fn full(spec: &ChainSpec) -> Self { - let base_block = BeaconBlockBase::full(spec); + let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); let sync_aggregate = SyncAggregate { sync_committee_signature: AggregateSignature::empty(), sync_committee_bits: BitVector::default(), @@ -428,12 +428,13 @@ impl BeaconBlockAltair { deposit_count: 0, }, graffiti: Graffiti::default(), + _phantom: PhantomData, }, } } } -impl BeaconBlockMerge { +impl> BeaconBlockMerge { /// Returns an empty Merge block to be used during genesis. pub fn empty(spec: &ChainSpec) -> Self { BeaconBlockMerge { @@ -455,39 +456,105 @@ impl BeaconBlockMerge { deposits: VariableList::empty(), voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), - execution_payload: ExecutionPayload::empty(), + execution_payload: Payload::default(), }, } } +} - /// Return an Merge block where the block has maximum size. - pub fn full(spec: &ChainSpec) -> Self { - let altair_block = BeaconBlockAltair::full(spec); - BeaconBlockMerge { - slot: spec.genesis_slot, - proposer_index: 0, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body: BeaconBlockBodyMerge { - proposer_slashings: altair_block.body.proposer_slashings, - attester_slashings: altair_block.body.attester_slashings, - attestations: altair_block.body.attestations, - deposits: altair_block.body.deposits, - voluntary_exits: altair_block.body.voluntary_exits, - sync_aggregate: altair_block.body.sync_aggregate, - randao_reveal: Signature::empty(), - eth1_data: Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - deposit_count: 0, - }, - graffiti: Graffiti::default(), - execution_payload: ExecutionPayload::default(), - }, +// We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads. +impl From>> + for BeaconBlockBase> +{ + fn from(block: BeaconBlockBase>) -> Self { + let BeaconBlockBase { + slot, + proposer_index, + parent_root, + state_root, + body, + } = block; + + BeaconBlockBase { + slot, + proposer_index, + parent_root, + state_root, + body: body.into(), } } } +impl From>> + for BeaconBlockAltair> +{ + fn from(block: BeaconBlockAltair>) -> Self { + let BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root, + body, + } = block; + + BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root, + body: body.into(), + } + } +} + +// We can convert blocks with payloads to blocks without payloads, and an optional payload. +macro_rules! impl_from { + ($ty_name:ident, <$($from_params:ty),*>, <$($to_params:ty),*>, $body_expr:expr) => { + impl From<$ty_name<$($from_params),*>> + for ($ty_name<$($to_params),*>, Option>) + { + #[allow(clippy::redundant_closure_call)] + fn from(block: $ty_name<$($from_params),*>) -> Self { + let $ty_name { + slot, + proposer_index, + parent_root, + state_root, + body, + } = block; + + let (body, payload) = ($body_expr)(body); + + ($ty_name { + slot, + proposer_index, + parent_root, + state_root, + body, + }, payload) + } + } + } +} + +impl_from!(BeaconBlockBase, >, >, |body: BeaconBlockBodyBase<_, _>| body.into()); +impl_from!(BeaconBlockAltair, >, >, |body: BeaconBlockBodyAltair<_, _>| body.into()); +impl_from!(BeaconBlockMerge, >, >, |body: BeaconBlockBodyMerge<_, _>| body.into()); + +impl From>> + for ( + BeaconBlock>, + Option>, + ) +{ + fn from(block: BeaconBlock>) -> Self { + map_beacon_block!(block, |inner, cons| { + let (block, payload) = inner.into(); + (cons(block), payload) + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index c4df4f2771..34761ea9a7 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -4,6 +4,7 @@ use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; +use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -25,8 +26,8 @@ use tree_hash_derive::TreeHash; TestRandom, Derivative, ), - derivative(PartialEq, Hash(bound = "T: EthSpec")), - serde(bound = "T: EthSpec", deny_unknown_fields), + derivative(PartialEq, Hash(bound = "T: EthSpec, Payload: ExecPayload")), + serde(bound = "T: EthSpec, Payload: ExecPayload", deny_unknown_fields), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), @@ -35,9 +36,9 @@ use tree_hash_derive::TreeHash; #[derive(Debug, Clone, Serialize, Deserialize, Derivative)] #[derivative(PartialEq, Hash(bound = "T: EthSpec"))] #[serde(untagged)] -#[serde(bound = "T: EthSpec")] +#[serde(bound = "T: EthSpec, Payload: ExecPayload")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -pub struct BeaconBlockBody { +pub struct BeaconBlockBody = FullPayload> { pub randao_reveal: Signature, pub eth1_data: Eth1Data, pub graffiti: Graffiti, @@ -48,8 +49,17 @@ pub struct BeaconBlockBody { pub voluntary_exits: VariableList, #[superstruct(only(Altair, Merge))] pub sync_aggregate: SyncAggregate, + // We flatten the execution payload so that serde can use the name of the inner type, + // either `execution_payload` for full payloads, or `execution_payload_header` for blinded + // payloads. #[superstruct(only(Merge))] - pub execution_payload: ExecutionPayload, + #[serde(flatten)] + pub execution_payload: Payload, + #[superstruct(only(Base, Altair))] + #[ssz(skip_serializing, skip_deserializing)] + #[tree_hash(skip_hashing)] + #[serde(skip)] + pub _phantom: PhantomData, } impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { @@ -63,6 +73,198 @@ impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { } } +// We can convert pre-Bellatrix block bodies without payloads into block bodies "with" payloads. +impl From>> + for BeaconBlockBodyBase> +{ + fn from(body: BeaconBlockBodyBase>) -> Self { + let BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + _phantom, + } = body; + + BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + _phantom: PhantomData, + } + } +} + +impl From>> + for BeaconBlockBodyAltair> +{ + fn from(body: BeaconBlockBodyAltair>) -> Self { + let BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + _phantom, + } = body; + + BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + _phantom: PhantomData, + } + } +} + +// Likewise bodies with payloads can be transformed into bodies without. +impl From>> + for ( + BeaconBlockBodyBase>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyBase>) -> Self { + let BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + _phantom, + } = body; + + ( + BeaconBlockBodyBase { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + _phantom: PhantomData, + }, + None, + ) + } +} + +impl From>> + for ( + BeaconBlockBodyAltair>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyAltair>) -> Self { + let BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + _phantom, + } = body; + + ( + BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + _phantom: PhantomData, + }, + None, + ) + } +} + +impl From>> + for ( + BeaconBlockBodyMerge>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyMerge>) -> Self { + let BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + } = body; + + ( + BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayload { + execution_payload_header: From::from(&execution_payload), + }, + }, + Some(execution_payload), + ) + } +} + +impl From>> + for ( + BeaconBlockBody>, + Option>, + ) +{ + fn from(body: BeaconBlockBody>) -> Self { + map_beacon_block_body!(body, |inner, cons| { + let (block, payload) = inner.into(); + (cons(block), payload) + }) + } +} + #[cfg(test)] mod tests { mod base { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 1472cabbad..13f431abcd 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -507,7 +507,7 @@ impl ChainSpec { * Fork choice */ safe_slots_to_update_justified: 8, - proposer_score_boost: None, + proposer_score_boost: Some(40), /* * Eth1 @@ -705,7 +705,7 @@ impl ChainSpec { * Fork choice */ safe_slots_to_update_justified: 8, - proposer_score_boost: None, + proposer_score_boost: Some(40), /* * Eth1 @@ -1268,7 +1268,7 @@ mod yaml_tests { EJECTION_BALANCE: 16000000000 MIN_PER_EPOCH_CHURN_LIMIT: 4 CHURN_LIMIT_QUOTIENT: 65536 - PROPOSER_SCORE_BOOST: 70 + PROPOSER_SCORE_BOOST: 40 DEPOSIT_CHAIN_ID: 1 DEPOSIT_NETWORK_ID: 1 DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index d934a6ca00..315fe071eb 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -8,7 +8,11 @@ use tree_hash_derive::TreeHash; use ssz_types::{FixedVector, VariableList}; -pub type Transaction = VariableList; +pub type Transaction = VariableList; +pub type Transactions = VariableList< + Transaction<::MaxBytesPerTransaction>, + ::MaxTransactionsPerPayload, +>; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( @@ -38,8 +42,7 @@ pub struct ExecutionPayload { pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] - pub transactions: - VariableList, T::MaxTransactionsPerPayload>, + pub transactions: Transactions, } impl ExecutionPayload { @@ -52,9 +55,9 @@ impl ExecutionPayload { pub fn max_execution_payload_size() -> usize { // Fixed part Self::empty().as_ssz_bytes().len() - // Max size of variable length `extra_data` field - + (T::max_extra_data_bytes() * ::ssz_fixed_len()) - // Max size of variable length `transactions` field - + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) + // Max size of variable length `extra_data` field + + (T::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (T::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + T::max_bytes_per_transaction())) } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 53f2caaa22..728ffd1ee0 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -1,15 +1,18 @@ use crate::{test_utils::TestRandom, *}; +use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use ssz_types::{FixedVector, VariableList}; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive( - Default, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Default, Debug, Clone, Serialize, Deserialize, Derivative, Encode, Decode, TreeHash, TestRandom, )] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] pub struct ExecutionPayloadHeader { pub parent_hash: ExecutionBlockHash, pub fee_recipient: Address, @@ -39,3 +42,24 @@ impl ExecutionPayloadHeader { Self::default() } } + +impl<'a, T: EthSpec> From<&'a ExecutionPayload> for ExecutionPayloadHeader { + fn from(payload: &'a ExecutionPayload) -> Self { + ExecutionPayloadHeader { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + } + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index eaa59c9633..1cdee71341 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -70,6 +70,7 @@ pub mod config_and_preset; pub mod fork_context; pub mod participation_flags; pub mod participation_list; +pub mod payload; pub mod preset; pub mod slot_epoch; pub mod subnet_id; @@ -114,7 +115,7 @@ pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; pub use crate::execution_block_hash::ExecutionBlockHash; -pub use crate::execution_payload::{ExecutionPayload, Transaction}; +pub use crate::execution_payload::{ExecutionPayload, Transaction, Transactions}; pub use crate::execution_payload_header::ExecutionPayloadHeader; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; @@ -126,6 +127,7 @@ pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; pub use crate::participation_flags::ParticipationFlags; pub use crate::participation_list::ParticipationList; +pub use crate::payload::{BlindedPayload, BlockType, ExecPayload, FullPayload}; pub use crate::pending_attestation::PendingAttestation; pub use crate::preset::{AltairPreset, BasePreset, BellatrixPreset}; pub use crate::proposer_preparation_data::ProposerPreparationData; @@ -136,7 +138,7 @@ pub use crate::shuffling_id::AttestationShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{ SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockHash, - SignedBeaconBlockMerge, + SignedBeaconBlockMerge, SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs new file mode 100644 index 0000000000..cb4678e8d9 --- /dev/null +++ b/consensus/types/src/payload.rs @@ -0,0 +1,262 @@ +use crate::{test_utils::TestRandom, *}; +use derivative::Derivative; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use std::convert::TryFrom; +use std::fmt::Debug; +use std::hash::Hash; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; + +pub enum BlockType { + Blinded, + Full, +} + +pub trait ExecPayload: + Debug + + Clone + + Encode + + Decode + + TestRandom + + TreeHash + + Default + + PartialEq + + Serialize + + DeserializeOwned + + Hash + + TryFrom> + + From> +{ + fn block_type() -> BlockType; + + /// Convert the payload into a payload header. + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader; + + // We provide a subset of field accessors, for the fields used in `consensus`. + // + // More fields can be added here if you wish. + fn parent_hash(&self) -> ExecutionBlockHash; + fn prev_randao(&self) -> Hash256; + fn block_number(&self) -> u64; + fn timestamp(&self) -> u64; + fn block_hash(&self) -> ExecutionBlockHash; +} + +impl ExecPayload for FullPayload { + fn block_type() -> BlockType { + BlockType::Full + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + ExecutionPayloadHeader::from(&self.execution_payload) + } + + fn parent_hash(&self) -> ExecutionBlockHash { + self.execution_payload.parent_hash + } + + fn prev_randao(&self) -> Hash256 { + self.execution_payload.prev_randao + } + + fn block_number(&self) -> u64 { + self.execution_payload.block_number + } + + fn timestamp(&self) -> u64 { + self.execution_payload.timestamp + } + + fn block_hash(&self) -> ExecutionBlockHash { + self.execution_payload.block_hash + } +} + +impl ExecPayload for BlindedPayload { + fn block_type() -> BlockType { + BlockType::Blinded + } + + fn to_execution_payload_header(&self) -> ExecutionPayloadHeader { + self.execution_payload_header.clone() + } + + fn parent_hash(&self) -> ExecutionBlockHash { + self.execution_payload_header.parent_hash + } + + fn prev_randao(&self) -> Hash256 { + self.execution_payload_header.prev_randao + } + + fn block_number(&self) -> u64 { + self.execution_payload_header.block_number + } + + fn timestamp(&self) -> u64 { + self.execution_payload_header.timestamp + } + + fn block_hash(&self) -> ExecutionBlockHash { + self.execution_payload_header.block_hash + } +} + +#[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +pub struct BlindedPayload { + pub execution_payload_header: ExecutionPayloadHeader, +} + +// NOTE: the `Default` implementation for `BlindedPayload` needs to be different from the `Default` +// implementation for `ExecutionPayloadHeader` because payloads are checked for equality against the +// default payload in `is_merge_transition_block` to determine whether the merge has occurred. +// +// The default `BlindedPayload` is therefore the payload header that results from blinding the +// default `ExecutionPayload`, which differs from the default `ExecutionPayloadHeader` in that +// its `transactions_root` is the hash of the empty list rather than 0x0. +impl Default for BlindedPayload { + fn default() -> Self { + Self { + execution_payload_header: ExecutionPayloadHeader::from(&ExecutionPayload::default()), + } + } +} + +impl From> for BlindedPayload { + fn from(execution_payload_header: ExecutionPayloadHeader) -> Self { + Self { + execution_payload_header, + } + } +} + +impl From> for ExecutionPayloadHeader { + fn from(blinded: BlindedPayload) -> Self { + blinded.execution_payload_header + } +} + +impl From> for BlindedPayload { + fn from(execution_payload: ExecutionPayload) -> Self { + Self { + execution_payload_header: ExecutionPayloadHeader::from(&execution_payload), + } + } +} + +impl TreeHash for BlindedPayload { + fn tree_hash_type() -> tree_hash::TreeHashType { + >::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + self.execution_payload_header.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + >::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.execution_payload_header.tree_hash_root() + } +} + +impl Decode for BlindedPayload { + fn is_ssz_fixed_len() -> bool { + as Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + as Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + Ok(Self { + execution_payload_header: ExecutionPayloadHeader::from_ssz_bytes(bytes)?, + }) + } +} + +impl Encode for BlindedPayload { + fn is_ssz_fixed_len() -> bool { + as Encode>::is_ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.execution_payload_header.ssz_append(buf) + } + + fn ssz_bytes_len(&self) -> usize { + self.execution_payload_header.ssz_bytes_len() + } +} + +#[derive(Default, Debug, Clone, Serialize, Deserialize, TestRandom, Derivative)] +#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] +#[serde(bound = "T: EthSpec")] +pub struct FullPayload { + pub execution_payload: ExecutionPayload, +} + +impl From> for FullPayload { + fn from(execution_payload: ExecutionPayload) -> Self { + Self { execution_payload } + } +} + +impl TryFrom> for FullPayload { + type Error = (); + + fn try_from(_: ExecutionPayloadHeader) -> Result { + Err(()) + } +} + +impl TreeHash for FullPayload { + fn tree_hash_type() -> tree_hash::TreeHashType { + >::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + self.execution_payload.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + >::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.execution_payload.tree_hash_root() + } +} + +impl Decode for FullPayload { + fn is_ssz_fixed_len() -> bool { + as Decode>::is_ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + Ok(FullPayload { + execution_payload: Decode::from_ssz_bytes(bytes)?, + }) + } +} + +impl Encode for FullPayload { + fn is_ssz_fixed_len() -> bool { + as Encode>::is_ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.execution_payload.ssz_append(buf) + } + + fn ssz_bytes_len(&self) -> usize { + self.execution_payload.ssz_bytes_len() + } +} diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 8d7df0cb02..5488070688 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -52,27 +52,32 @@ impl From for Hash256 { ), derivative(PartialEq, Hash(bound = "E: EthSpec")), cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary)), - serde(bound = "E: EthSpec") - ) + serde(bound = "E: EthSpec, Payload: ExecPayload"), + ), + map_into(BeaconBlock), + map_ref_into(BeaconBlockRef), + map_ref_mut_into(BeaconBlockRefMut) )] #[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(untagged)] -#[serde(bound = "E: EthSpec")] +#[serde(bound = "E: EthSpec, Payload: ExecPayload")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct SignedBeaconBlock { +pub struct SignedBeaconBlock = FullPayload> { #[superstruct(only(Base), partial_getter(rename = "message_base"))] - pub message: BeaconBlockBase, + pub message: BeaconBlockBase, #[superstruct(only(Altair), partial_getter(rename = "message_altair"))] - pub message: BeaconBlockAltair, + pub message: BeaconBlockAltair, #[superstruct(only(Merge), partial_getter(rename = "message_merge"))] - pub message: BeaconBlockMerge, + pub message: BeaconBlockMerge, pub signature: Signature, } -impl SignedBeaconBlock { +pub type SignedBlindedBeaconBlock = SignedBeaconBlock>; + +impl> SignedBeaconBlock { /// Returns the name of the fork pertaining to `self`. /// /// Will return an `Err` if `self` has been instantiated to a variant conflicting with the fork @@ -94,7 +99,7 @@ impl SignedBeaconBlock { /// SSZ decode with custom decode function. pub fn from_ssz_bytes_with( bytes: &[u8], - block_decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, + block_decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, ) -> Result { // We need the customer decoder for `BeaconBlock`, which doesn't compose with the other // SSZ utils, so we duplicate some parts of `ssz_derive` here. @@ -113,7 +118,7 @@ impl SignedBeaconBlock { } /// Create a new `SignedBeaconBlock` from a `BeaconBlock` and `Signature`. - pub fn from_block(block: BeaconBlock, signature: Signature) -> Self { + pub fn from_block(block: BeaconBlock, signature: Signature) -> Self { match block { BeaconBlock::Base(message) => { SignedBeaconBlock::Base(SignedBeaconBlockBase { message, signature }) @@ -131,32 +136,28 @@ impl SignedBeaconBlock { /// /// This is necessary to get a `&BeaconBlock` from a `SignedBeaconBlock` because /// `SignedBeaconBlock` only contains a `BeaconBlock` _variant_. - pub fn deconstruct(self) -> (BeaconBlock, Signature) { - match self { - SignedBeaconBlock::Base(block) => (BeaconBlock::Base(block.message), block.signature), - SignedBeaconBlock::Altair(block) => { - (BeaconBlock::Altair(block.message), block.signature) - } - SignedBeaconBlock::Merge(block) => (BeaconBlock::Merge(block.message), block.signature), - } + pub fn deconstruct(self) -> (BeaconBlock, Signature) { + map_signed_beacon_block_into_beacon_block!(self, |block, beacon_block_cons| { + (beacon_block_cons(block.message), block.signature) + }) } /// Accessor for the block's `message` field as a ref. - pub fn message(&self) -> BeaconBlockRef<'_, E> { - match self { - SignedBeaconBlock::Base(inner) => BeaconBlockRef::Base(&inner.message), - SignedBeaconBlock::Altair(inner) => BeaconBlockRef::Altair(&inner.message), - SignedBeaconBlock::Merge(inner) => BeaconBlockRef::Merge(&inner.message), - } + pub fn message<'a>(&'a self) -> BeaconBlockRef<'a, E, Payload> { + map_signed_beacon_block_ref_into_beacon_block_ref!( + &'a _, + self.to_ref(), + |inner, cons| cons(&inner.message) + ) } /// Accessor for the block's `message` as a mutable reference (for testing only). - pub fn message_mut(&mut self) -> BeaconBlockRefMut<'_, E> { - match self { - SignedBeaconBlock::Base(inner) => BeaconBlockRefMut::Base(&mut inner.message), - SignedBeaconBlock::Altair(inner) => BeaconBlockRefMut::Altair(&mut inner.message), - SignedBeaconBlock::Merge(inner) => BeaconBlockRefMut::Merge(&mut inner.message), - } + pub fn message_mut<'a>(&'a mut self) -> BeaconBlockRefMut<'a, E, Payload> { + map_signed_beacon_block_ref_mut_into_beacon_block_ref_mut!( + &'a _, + self.to_mut(), + |inner, cons| cons(&mut inner.message) + ) } /// Verify `self.signature`. @@ -225,3 +226,165 @@ impl SignedBeaconBlock { self.message().tree_hash_root() } } + +// We can convert pre-Bellatrix blocks without payloads into blocks with payloads. +impl From>> + for SignedBeaconBlockBase> +{ + fn from(signed_block: SignedBeaconBlockBase>) -> Self { + let SignedBeaconBlockBase { message, signature } = signed_block; + SignedBeaconBlockBase { + message: message.into(), + signature, + } + } +} + +impl From>> + for SignedBeaconBlockAltair> +{ + fn from(signed_block: SignedBeaconBlockAltair>) -> Self { + let SignedBeaconBlockAltair { message, signature } = signed_block; + SignedBeaconBlockAltair { + message: message.into(), + signature, + } + } +} + +// Post-Bellatrix blocks can be "unblinded" by adding the full payload. +// NOTE: It might be nice to come up with a `superstruct` pattern to abstract over this before +// the first fork after Bellatrix. +impl SignedBeaconBlockMerge> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayload, + ) -> SignedBeaconBlockMerge> { + let SignedBeaconBlockMerge { + message: + BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayload { .. }, + }, + }, + signature, + } = self; + SignedBeaconBlockMerge { + message: BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + }, + }, + signature, + } + } +} + +impl SignedBeaconBlock> { + pub fn try_into_full_block( + self, + execution_payload: Option>, + ) -> Option>> { + let full_block = match self { + SignedBeaconBlock::Base(block) => SignedBeaconBlock::Base(block.into()), + SignedBeaconBlock::Altair(block) => SignedBeaconBlock::Altair(block.into()), + SignedBeaconBlock::Merge(block) => { + SignedBeaconBlock::Merge(block.into_full_block(execution_payload?)) + } + }; + Some(full_block) + } +} + +// We can blind blocks with payloads by converting the payload into a header. +// +// We can optionally keep the header, or discard it. +impl From> + for (SignedBlindedBeaconBlock, Option>) +{ + fn from(signed_block: SignedBeaconBlock) -> Self { + let (block, signature) = signed_block.deconstruct(); + let (blinded_block, payload) = block.into(); + ( + SignedBeaconBlock::from_block(blinded_block, signature), + payload, + ) + } +} + +impl From> for SignedBlindedBeaconBlock { + fn from(signed_block: SignedBeaconBlock) -> Self { + let (blinded_block, _) = signed_block.into(); + blinded_block + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn add_remove_payload_roundtrip() { + type E = MainnetEthSpec; + + let spec = &E::default_spec(); + let sig = Signature::empty(); + let blocks = vec![ + SignedBeaconBlock::::from_block( + BeaconBlock::Base(BeaconBlockBase::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block( + BeaconBlock::Altair(BeaconBlockAltair::empty(spec)), + sig.clone(), + ), + SignedBeaconBlock::from_block(BeaconBlock::Merge(BeaconBlockMerge::empty(spec)), sig), + ]; + + for block in blocks { + let (blinded_block, payload): (SignedBlindedBeaconBlock, _) = block.clone().into(); + assert_eq!(blinded_block.tree_hash_root(), block.tree_hash_root()); + + if let Some(payload) = &payload { + assert_eq!( + payload.tree_hash_root(), + block + .message() + .execution_payload() + .unwrap() + .tree_hash_root() + ); + } + + let reconstructed = blinded_block.try_into_full_block(payload).unwrap(); + assert_eq!(reconstructed, block); + } + } +} diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 5528558d25..5e2a5e07af 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -3,6 +3,7 @@ use rand::RngCore; use rand::SeedableRng; use rand_xorshift::XorShiftRng; use ssz_types::typenum::Unsigned; +use std::marker::PhantomData; use std::sync::Arc; mod address; @@ -25,6 +26,12 @@ pub trait TestRandom { fn random_for_test(rng: &mut impl RngCore) -> Self; } +impl TestRandom for PhantomData { + fn random_for_test(_rng: &mut impl RngCore) -> Self { + PhantomData::default() + } +} + impl TestRandom for bool { fn random_for_test(rng: &mut impl RngCore) -> Self { (rng.next_u32() % 2) == 1 diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index d71b46dc55..912f49c6f0 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -13,7 +13,7 @@ serde = "1.0.116" serde_derive = "1.0.116" eth2_serde_utils = "0.1.1" hex = "0.4.2" -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" ethereum-types = "0.12.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index 574dbcf2c2..7490ab6093 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_hashing" -version = "0.2.0" +version = "0.3.0" authors = ["Paul Hauner "] edition = "2021" license = "Apache-2.0" @@ -9,8 +9,8 @@ description = "Hashing primitives used in Ethereum 2.0" [dependencies] lazy_static = { version = "1.4.0", optional = true } ring = "0.16.19" -sha2 = "0.9.5" -cpufeatures = "0.1.5" +sha2 = "0.10.2" +cpufeatures = "0.2.2" [dev-dependencies] rustc-hex = "2.1.0" diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index 2bfdde8ac5..98521c8fbb 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = "0.7.3" +rand = "0.8.5" hmac = "0.11.0" pbkdf2 = { version = "0.8.0", default-features = false } scrypt = { version = "0.7.0", default-features = false } diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index e564209b65..71f66ff933 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -11,7 +11,7 @@ serde = "1.0.116" serde_json = "1.0.58" serde_repr = "0.1.6" uuid = { version = "0.8.1", features = ["serde", "v4"] } -rand = "0.7.3" +rand = "0.8.5" eth2_keystore = { path = "../eth2_keystore" } eth2_key_derivation = { path = "../eth2_key_derivation" } tiny-bip39 = "0.8.1" diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index 436f9b1e19..f715528138 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -15,4 +15,4 @@ store = { path = "../beacon_node/store" } tempfile = "3.1.0" types = { path = "../consensus/types" } slog = "2.5.2" -strum = { version = "0.24", features = ["derive"] } +strum = { version = "0.24.0", features = ["derive"] } diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 98b728c892..6717bb0f46 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -63,7 +63,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) .visible_aliases(&["db"]) .setting(clap::AppSettings::ColoredHelp) - .about("") + .about("Manage a beacon node database") .arg( Arg::with_name("slots-per-restore-point") .long("slots-per-restore-point") @@ -100,7 +100,9 @@ fn parse_client_config( client_config.freezer_db_path = Some(freezer_dir); } - client_config.store.slots_per_restore_point = get_slots_per_restore_point::(cli_args)?; + let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; + client_config.store.slots_per_restore_point = sprp; + client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; Ok(client_config) } @@ -124,10 +126,18 @@ pub fn display_db_version( }, client_config.store, spec, - log, + log.clone(), )?; - println!("Database version: {}", version.as_u64()); + info!(log, "Database version: {}", version.as_u64()); + + if version != CURRENT_SCHEMA_VERSION { + info!( + log, + "Latest schema version: {}", + CURRENT_SCHEMA_VERSION.as_u64(), + ); + } Ok(()) } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 77aa6769fe..d5b0dc3e96 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.1.5" +version = "2.2.1" authors = ["Paul Hauner "] edition = "2021" @@ -33,7 +33,7 @@ lighthouse_version = { path = "../common/lighthouse_version" } directory = { path = "../common/directory" } account_utils = { path = "../common/account_utils" } eth2_wallet = { path = "../crypto/eth2_wallet" } -web3 = { version = "0.17.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } +web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } eth1_test_rig = { path = "../testing/eth1_test_rig" } sensitive_url = { path = "../common/sensitive_url" } eth2 = { path = "../common/eth2" } diff --git a/lcli/src/etl/block_efficiency.rs b/lcli/src/etl/block_efficiency.rs deleted file mode 100644 index 1c7ba1fe61..0000000000 --- a/lcli/src/etl/block_efficiency.rs +++ /dev/null @@ -1,379 +0,0 @@ -use clap::ArgMatches; -use eth2::types::*; -use eth2::{BeaconNodeHttpClient, Timeouts}; -use log::{error, info}; -use sensitive_url::SensitiveUrl; -use std::collections::{HashMap, HashSet}; -use std::fs::File; -use std::io::Write; -use std::path::PathBuf; -use std::time::Duration; - -type CommitteePosition = usize; -type Committee = u64; -type InclusionDistance = u64; -type ValidatorIndex = u64; - -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -struct UniqueAttestation { - slot: Slot, - committee_index: Committee, - committee_position: CommitteePosition, -} - -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -struct ProposerInfo { - proposer_index: ValidatorIndex, - graffiti: String, -} - -#[derive(Debug)] -struct CommitteeInfo { - number_of_committees: usize, - validators_per_committee: usize, -} - -async fn get_validator_set_len( - node: &BeaconNodeHttpClient, - slot: Slot, -) -> Result { - let active_validator_set = node - .get_beacon_states_validators(StateId::Slot(slot), None, None) - .await - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "No validators found".to_string())? - .data; - Ok(active_validator_set - .iter() - .filter(|x| x.status.superstatus() == ValidatorStatus::Active) - .count()) -} - -async fn get_block_attestations_set<'a, T: EthSpec>( - node: &BeaconNodeHttpClient, - slot: Slot, -) -> Result, ProposerInfo)>, String> { - let mut unique_attestations_set: HashMap = HashMap::new(); - - let option_block: Option>> = node - .get_beacon_blocks(BlockId::Slot(slot)) - .await - .map_err(|e| format!("{:?}", e))?; - - let block = match option_block { - Some(block) => block.data, - // No block was proposed for this slot. - None => return Ok(None), - }; - - let proposer = ProposerInfo { - proposer_index: block.message().proposer_index(), - graffiti: block - .message() - .body() - .graffiti() - .as_utf8_lossy() - // Remove commas and apostropes from graffiti to ensure correct CSV format. - .replace(',', "") - .replace('"', "") - .replace('\'', ""), - }; - - let attestations = block.message().body().attestations(); - - for attestation in attestations.iter() { - for (position, voted) in attestation.aggregation_bits.iter().enumerate() { - if voted { - let unique_attestation = UniqueAttestation { - slot: attestation.data.slot, - committee_index: attestation.data.index, - committee_position: position, - }; - let inclusion_distance: u64 = slot - .as_u64() - .checked_sub(attestation.data.slot.as_u64()) - .ok_or_else(|| "Attestation slot is larger than the block slot".to_string())?; - unique_attestations_set.insert(unique_attestation, inclusion_distance); - } - } - } - - Ok(Some((unique_attestations_set, proposer))) -} - -async fn get_epoch_committee_data( - node: &BeaconNodeHttpClient, - epoch: Epoch, -) -> Result<(Vec, CommitteeInfo), String> { - let committee_data = node - .get_beacon_states_committees( - StateId::Slot(Epoch::start_slot(epoch, T::slots_per_epoch())), - None, - None, - Some(epoch), - ) - .await - .map_err(|e| format!("{:?}", e))? - .ok_or_else(|| "No committees found".to_string())? - .data; - - let committee_info = CommitteeInfo { - number_of_committees: committee_data.len(), - // FIXME: validators.len() isn't consistent between different committees in the - // same epoch. - validators_per_committee: committee_data[0].validators.len(), - }; - - Ok((committee_data, committee_info)) -} - -pub async fn run(matches: &ArgMatches<'_>) -> Result<(), String> { - const SECONDS_PER_SLOT: Duration = Duration::from_secs(12); - let output_path: PathBuf = clap_utils::parse_required(matches, "output")?; - let start_epoch: Epoch = clap_utils::parse_required(matches, "start-epoch")?; - let offline_window: u64 = matches - .value_of("offline-window") - .unwrap_or("3") - .parse() - .map_err(|e| format!("{:?}", e))?; - let calculate_offline_vals = offline_window != 0; - - if start_epoch == 0 { - return Err("start_epoch cannot be 0.".to_string()); - } - let initialization_epoch: Epoch = start_epoch - 1; - let end_epoch: Epoch = clap_utils::parse_required(matches, "end-epoch")?; - - if end_epoch < start_epoch { - return Err("start_epoch must be smaller than end_epoch".to_string()); - } - - let mut available_attestations_set: HashSet = HashSet::new(); - let mut included_attestations_set: HashMap = - HashMap::new(); - - // Build validator set HashMap - // This allows a 'best effort' attempt to normalize block efficiencies. - let mut online_validator_set: HashMap = HashMap::new(); - - let mut proposer_map: HashMap = HashMap::new(); - - let mut file = File::options() - .read(true) - .write(true) - .create(true) - .open(output_path) - .map_err(|e| format!("Unable to open file: {}", e))?; - - write!(file, "slot,proposer,available,included,offline,graffiti").unwrap(); - - // Initialize API. - let endpoint = matches - .value_of("endpoint") - .unwrap_or("http://localhost:5052/"); - let node = BeaconNodeHttpClient::new( - SensitiveUrl::parse(endpoint).map_err(|_| "Unable to parse endpoint.".to_string())?, - Timeouts::set_all(SECONDS_PER_SLOT), - ); - - // Check we can connect to the API. - let version = - match node.get_node_version().await { - Ok(version_response) => version_response.data.version, - Err(_) => return Err( - "Error: A working HTTP API server is required. Ensure one is synced and available." - .to_string(), - ), - }; - - // Check we are synced past the required epoch range. - let head_slot_synced = - match node.get_node_syncing().await { - Ok(synced_response) => synced_response.data.head_slot, - Err(_) => return Err( - "Error: A working HTTP API server is required. Ensure one is synced and available." - .to_string(), - ), - }; - - if head_slot_synced < end_epoch.end_slot(T::slots_per_epoch()) { - return Err( - "Error: The beacon node is not sufficiently synced. Make sure your node is synced \ - past the desired `end-epoch` and that you aren't requesting future epochs." - .to_string(), - ); - } - - // Whether the beacon node is responding to requests. This is helpful for logging. - let mut connected: bool = true; - info!("Connected to endpoint at: {:?} - {:?}", endpoint, version); - - // Loop over epochs. - for epoch in (initialization_epoch.as_u64()..=end_epoch.as_u64()).map(Epoch::new) { - if epoch != initialization_epoch { - info!("Analysing epoch {}...", epoch); - } else { - info!("Initializing..."); - } - let mut epoch_data: Vec<(Slot, Option, usize, usize)> = Vec::new(); - - // Current epochs available attestations set - let (committee_data, committee_info) = loop { - if let Ok(committee_result) = get_epoch_committee_data::(&node, epoch).await { - if !connected { - info!("Connected to endpoint at: {:?} - {:?}", endpoint, version); - connected = true; - } - break committee_result; - } - - if connected { - connected = false; - error!("A request to the Beacon Node API failed. Check connectivity."); - } - }; - - // Ensure available attestations don't exceed the possible amount of attestations - // as determined by the committee size/number. - // This is unlikely to happen, but not impossible. - let max_possible_attesations = - committee_info.validators_per_committee * committee_info.number_of_committees; - - // Get number of active validators. - let active_validators = - get_validator_set_len::(&node, epoch.start_slot(T::slots_per_epoch())).await?; - - for slot in epoch.slot_iter(T::slots_per_epoch()) { - // Get all included attestations. - let block_result = loop { - if let Ok(block_result) = get_block_attestations_set::(&node, slot).await { - if !connected { - info!("Connected to endpoint at: {:?} - {:?}", endpoint, version); - connected = true; - } - break block_result; - }; - if connected { - connected = false; - error!("A request to the Beacon Node API failed. Check connectivity."); - } - }; - let (mut attestations_in_block, proposer) = match block_result { - Some(output) => (output.0, Some(output.1)), - None => (HashMap::new(), None), - }; - - // Insert block proposer into proposer_map. - if let Some(proposer_info) = proposer { - proposer_map.insert(slot, proposer_info.clone()); - } - - // Remove duplicate attestations. - attestations_in_block.retain(|x, _| included_attestations_set.get(x).is_none()); - - // Add them to the set. - included_attestations_set.extend(attestations_in_block.clone()); - - // Remove expired available attestations. - available_attestations_set.retain(|x| x.slot >= (slot.as_u64().saturating_sub(32))); - - // Don't write data from the initialization epoch. - if epoch != initialization_epoch { - let included = attestations_in_block.len(); - - let available = if max_possible_attesations < available_attestations_set.len() { - max_possible_attesations - } else { - available_attestations_set.len() - }; - - // Get proposer information. - let proposer = proposer_map.get(&slot).cloned(); - - // Store slot data. - epoch_data.push((slot, proposer, available, included)); - } - - // Included attestations are no longer available. - for new_attestation in &attestations_in_block { - available_attestations_set.remove(new_attestation.0); - } - - // Get all available attestations. - for committee in &committee_data { - if committee.slot == slot { - for position in 0..committee.validators.len() { - let unique_attestation = UniqueAttestation { - slot: committee.slot, - committee_index: committee.index, - committee_position: position, - }; - available_attestations_set.insert(unique_attestation.clone()); - } - } - } - } - - let mut offline = "None".to_string(); - if calculate_offline_vals { - // Get all online validators for the epoch. - for committee in &committee_data { - for position in 0..committee.validators.len() { - let unique_attestation = UniqueAttestation { - slot: committee.slot, - committee_index: committee.index, - committee_position: position, - }; - let index = committee.validators.get(position).ok_or_else(|| { - "Error parsing validator indices from committee data".to_string() - })?; - - if included_attestations_set.get(&unique_attestation).is_some() { - online_validator_set.insert(*index, epoch); - } - } - } - - // Calculate offline validators. - offline = if epoch >= start_epoch + offline_window { - active_validators - .checked_sub(online_validator_set.len()) - .ok_or_else(|| "Online set is greater than active set".to_string())? - .to_string() - } else { - "None".to_string() - }; - } - - // Write epoch data. - for (slot, proposer, available, included) in epoch_data { - let proposer_index = proposer - .clone() - .map_or("None".to_string(), |x| x.proposer_index.to_string()); - let graffiti = proposer.map_or("None".to_string(), |x| x.graffiti); - write!( - file, - "\n{},{},{},{},{},{}", - slot, proposer_index, available, included, offline, graffiti - ) - .unwrap(); - } - - // Free some memory by removing included attestations older than 1 epoch. - included_attestations_set.retain(|x, _| { - x.slot >= Epoch::new(epoch.as_u64().saturating_sub(1)).start_slot(T::slots_per_epoch()) - }); - - if calculate_offline_vals { - // Remove old validators from the validator set which are outside the offline window. - online_validator_set.retain(|_, x| { - *x >= Epoch::new( - epoch - .as_u64() - .saturating_sub(offline_window.saturating_sub(1)), - ) - }); - } - } - Ok(()) -} diff --git a/lcli/src/etl/mod.rs b/lcli/src/etl/mod.rs deleted file mode 100644 index 1137fbb2ef..0000000000 --- a/lcli/src/etl/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod block_efficiency; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 9af4b25548..996bfc0ac7 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -5,7 +5,6 @@ mod check_deposit_data; mod create_payload_header; mod deploy_deposit_contract; mod eth1_genesis; -mod etl; mod generate_bootnode_enr; mod insecure_validators; mod interop_genesis; @@ -599,63 +598,6 @@ fn main() { .help("The number of nodes to divide the validator keys to"), ) ) - .subcommand( - SubCommand::with_name("etl-block-efficiency") - .about( - "Performs ETL analysis of block efficiency. Requires a Beacon Node API to \ - extract data from.", - ) - .arg( - Arg::with_name("endpoint") - .long("endpoint") - .short("e") - .takes_value(true) - .default_value("http://localhost:5052") - .help( - "The endpoint of the Beacon Node API." - ), - ) - .arg( - Arg::with_name("output") - .long("output") - .short("o") - .takes_value(true) - .help("The path of the output data in CSV file.") - .required(true), - ) - .arg( - Arg::with_name("start-epoch") - .long("start-epoch") - .takes_value(true) - .help( - "The first epoch in the range of epochs to be evaluated. Use with \ - --end-epoch.", - ) - .required(true), - ) - .arg( - Arg::with_name("end-epoch") - .long("end-epoch") - .takes_value(true) - .help( - "The last epoch in the range of epochs to be evaluated. Use with \ - --start-epoch.", - ) - .required(true), - ) - .arg( - Arg::with_name("offline-window") - .long("offline-window") - .takes_value(true) - .default_value("3") - .help( - "If a validator does not submit an attestion within this many epochs, \ - they are deemed offline. For example, for a offline window of 3, if a \ - validator does not attest in epochs 4, 5 or 6, it is deemed offline \ - during epoch 6. A value of 0 will skip these checks." - ) - ) - ) .get_matches(); let result = matches @@ -737,10 +679,6 @@ fn run( .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), ("insecure-validators", Some(matches)) => insecure_validators::run(matches) .map_err(|e| format!("Failed to run insecure-validators command: {}", e)), - ("etl-block-efficiency", Some(matches)) => env - .runtime() - .block_on(etl::block_efficiency::run::(matches)) - .map_err(|e| format!("Failed to run etl-block_efficiency: {}", e)), (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 1565ec3241..28d338c829 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "2.1.5" +version = "2.2.1" authors = ["Sigma Prime "] edition = "2021" autotests = false @@ -27,7 +27,7 @@ slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = { version = "2.1.1", features = ["json"] } types = { "path" = "../consensus/types" } bls = { path = "../crypto/bls" } -eth2_hashing = "0.2.0" +eth2_hashing = "0.3.0" clap = "2.33.3" env_logger = "0.9.0" environment = { path = "./environment" } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 91feef5b05..160f696542 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -13,9 +13,7 @@ use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; -use sloggers::{ - file::FileLoggerBuilder, null::NullLoggerBuilder, types::Format, types::Severity, Build, -}; +use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; use std::fs::create_dir_all; use std::path::PathBuf; use std::sync::Arc; @@ -33,6 +31,8 @@ use { #[cfg(not(target_family = "unix"))] use {futures::channel::oneshot, std::cell::RefCell}; +pub use task_executor::test_utils::null_logger; + const LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; @@ -506,13 +506,6 @@ impl Environment { } } -pub fn null_logger() -> Result { - let log_builder = NullLoggerBuilder; - log_builder - .build() - .map_err(|e| format!("Failed to start null logger: {:?}", e)) -} - #[cfg(target_family = "unix")] struct SignalFuture { signal: Signal, diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 8424a2fdc3..33aa8ad165 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -72,8 +72,8 @@ CHURN_LIMIT_QUOTIENT: 65536 # Fork choice # --------------------------------------------------------------- -# 70% -PROPOSER_SCORE_BOOST: 70 +# 40% +PROPOSER_SCORE_BOOST: 40 # Deposit contract # --------------------------------------------------------------- diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 254acd4075..be87083763 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -488,8 +488,8 @@ fn run( return Ok(()); } - if let Some(sub_matches) = matches.subcommand_matches("database_manager") { - eprintln!("Running database manager for {} network", network_name); + if let Some(sub_matches) = matches.subcommand_matches(database_manager::CMD) { + info!(log, "Running database manager for {} network", network_name); // Pass the entire `environment` to the database manager so it can run blocking operations. database_manager::run(sub_matches, environment)?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4d596ce703..5748bbd341 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -108,6 +108,26 @@ fn disable_lock_timeouts_flag() { .with_config(|config| assert!(!config.chain.enable_lock_timeouts)); } +#[test] +fn fork_choice_before_proposal_timeout_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.fork_choice_before_proposal_timeout_ms, + beacon_node::beacon_chain::chain_config::DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT + ) + }); +} + +#[test] +fn fork_choice_before_proposal_timeout_zero() { + CommandLineTest::new() + .flag("fork-choice-before-proposal-timeout", Some("0")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.fork_choice_before_proposal_timeout_ms, 0)); +} + #[test] fn freezer_dir_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); @@ -804,6 +824,40 @@ fn slots_per_restore_point_flag() { .run_with_zero_port() .with_config(|config| assert_eq!(config.store.slots_per_restore_point, 64)); } +#[test] +fn slots_per_restore_point_update_prev_default() { + use beacon_node::beacon_chain::store::config::{ + DEFAULT_SLOTS_PER_RESTORE_POINT, PREV_DEFAULT_SLOTS_PER_RESTORE_POINT, + }; + + CommandLineTest::new() + .flag("slots-per-restore-point", Some("2048")) + .run_with_zero_port() + .with_config_and_dir(|config, dir| { + // Check that 2048 is the previous default. + assert_eq!( + config.store.slots_per_restore_point, + PREV_DEFAULT_SLOTS_PER_RESTORE_POINT + ); + + // Restart the BN with the same datadir and the new default SPRP. It should + // allow this. + CommandLineTest::new() + .flag("datadir", Some(&dir.path().display().to_string())) + .flag("zero-ports", None) + .run_with_no_datadir() + .with_config(|config| { + // The dumped config will have the new default 8192 value, but the fact that + // the BN started and ran (with the same datadir) means that the override + // was successful. + assert_eq!( + config.store.slots_per_restore_point, + DEFAULT_SLOTS_PER_RESTORE_POINT + ); + }); + }) +} + #[test] fn block_cache_size_flag() { CommandLineTest::new() diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 0139b6624a..22b3408ab3 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -15,8 +15,8 @@ lighthouse_metrics = { path = "../common/lighthouse_metrics" } filesystem = { path = "../common/filesystem" } mdbx = { package = "libmdbx", version = "0.1.0" } lru = "0.7.1" -parking_lot = "0.11.0" -rand = "0.7.3" +parking_lot = "0.12.0" +rand = "0.8.5" safe_arith = { path = "../consensus/safe_arith" } serde = "1.0" serde_derive = "1.0" diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 7ff7fe5850..8126602f37 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -42,23 +42,23 @@ fn random_test(seed: u64, test_config: TestConfig) { let tempdir = tempdir().unwrap(); let mut config = Config::new(tempdir.path().into()); - config.validator_chunk_size = 1 << rng.gen_range(1, 4); + config.validator_chunk_size = 1 << rng.gen_range(1..4); - let chunk_size_exponent = rng.gen_range(1, 4); + let chunk_size_exponent = rng.gen_range(1..4); config.chunk_size = 1 << chunk_size_exponent; - config.history_length = 1 << rng.gen_range(chunk_size_exponent, chunk_size_exponent + 3); + config.history_length = 1 << rng.gen_range(chunk_size_exponent..chunk_size_exponent + 3); let slasher = Slasher::::open(config.clone(), test_logger()).unwrap(); let validators = (0..num_validators as u64).collect::>(); - let num_attestations = rng.gen_range(2, max_attestations + 1); + let num_attestations = rng.gen_range(2..max_attestations + 1); let mut current_epoch = Epoch::new(0); let mut attestations = vec![]; for _ in 0..num_attestations { - let num_attesters = rng.gen_range(1, num_validators); + let num_attesters = rng.gen_range(1..num_validators); let mut attesting_indices = validators .choose_multiple(&mut rng, num_attesters) .copied() @@ -70,17 +70,17 @@ fn random_test(seed: u64, test_config: TestConfig) { let source = rng.gen_range( current_epoch .as_u64() - .saturating_sub(config.history_length as u64 - 1), - current_epoch.as_u64() + 1, + .saturating_sub(config.history_length as u64 - 1) + ..current_epoch.as_u64() + 1, ); - let target = rng.gen_range(source, current_epoch.as_u64() + 1); + let target = rng.gen_range(source..current_epoch.as_u64() + 1); (source, target) } else { - let source = rng.gen_range(0, max(3 * current_epoch.as_u64(), 1)); - let target = rng.gen_range(source, max(3 * current_epoch.as_u64(), source + 1)); + let source = rng.gen_range(0..max(3 * current_epoch.as_u64(), 1)); + let target = rng.gen_range(source..max(3 * current_epoch.as_u64(), source + 1)); (source, target) }; - let target_root = rng.gen_range(0, 3); + let target_root = rng.gen_range(0..3); let attestation = indexed_att(&attesting_indices, source, target, target_root); if check_slashings { @@ -92,9 +92,9 @@ fn random_test(seed: u64, test_config: TestConfig) { // Maybe add a random block too if test_config.add_blocks && rng.gen_bool(0.1) { - let slot = rng.gen_range(0, 1 + 3 * current_epoch.as_u64() * E::slots_per_epoch() / 2); - let proposer = rng.gen_range(0, num_validators as u64); - let block_root = rng.gen_range(0, 2); + let slot = rng.gen_range(0..1 + 3 * current_epoch.as_u64() * E::slots_per_epoch() / 2); + let proposer = rng.gen_range(0..num_validators as u64); + let block_root = rng.gen_range(0..2); slasher.accept_block_header(block(slot, proposer, block_root)); } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 9744434f53..92c28aeb04 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -338,7 +338,11 @@ impl Tester { // function. if !valid { // A missing parent block whilst `valid == false` means the test should pass. - if let Some(parent_block) = self.harness.chain.get_block(&block.parent_root()).unwrap() + if let Some(parent_block) = self + .harness + .chain + .get_blinded_block(&block.parent_root()) + .unwrap() { let parent_state_root = parent_block.state_root(); let mut state = self diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 2c38d37597..82a2a12d61 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -20,8 +20,8 @@ use state_processing::{ use std::fmt::Debug; use std::path::Path; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, - ExecutionPayload, ForkName, ProposerSlashing, SignedVoluntaryExit, SyncAggregate, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, ForkName, + FullPayload, ProposerSlashing, SignedVoluntaryExit, SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -187,7 +187,13 @@ impl Operation for BeaconBlock { _: &Operations, ) -> Result<(), BlockProcessingError> { let mut ctxt = ConsensusContext::new(state.slot()); - process_block_header(state, self.to_ref(), VerifyBlockRoot::True, &mut ctxt, spec)?; + process_block_header( + state, + self.to_ref().temporary_block_header(), + VerifyBlockRoot::True, + &mut ctxt, + spec, + )?; Ok(()) } } @@ -220,7 +226,7 @@ impl Operation for SyncAggregate { } } -impl Operation for ExecutionPayload { +impl Operation for FullPayload { fn handler_name() -> String { "execution_payload".into() } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 4d068cb91f..540fe6903e 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -54,6 +54,7 @@ type_name!(DepositData); type_name!(DepositMessage); type_name!(Eth1Data); type_name_generic!(ExecutionPayload); +type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); type_name!(Fork); type_name!(ForkData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index e07e099b8f..8b415acc80 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -72,8 +72,8 @@ fn operations_sync_aggregate() { #[test] fn operations_execution_payload() { - OperationsHandler::>::default().run(); - OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); } #[test] diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index 57162f97b3..08766f14fc 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] tokio = { version = "1.14.0", features = ["time"] } -web3 = { version = "0.17.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } +web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } types = { path = "../../consensus/types"} serde_json = "1.0.58" deposit_contract = { path = "../../common/deposit_contract"} diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 1b80097cc8..52ae3922bc 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -194,6 +194,8 @@ impl DepositContract { to: Some(self.contract.address()), gas: Some(U256::from(DEPOSIT_GAS)), gas_price: None, + max_fee_per_gas: None, + max_priority_fee_per_gas: None, value: Some(from_gwei(deposit_data.amount)), // Note: the reason we use this `TransactionRequest` instead of just using the // function in `self.contract` is so that the `eth1_tx_data` function gets used diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs index 0de56fba3c..c0b94e22e8 100644 --- a/testing/execution_engine_integration/src/genesis_json.rs +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -43,7 +43,7 @@ pub fn geth_genesis_json() -> Value { /// Sourced from: /// -/// https://github.com/NethermindEth/nethermind/blob/themerge_kintsugi/src/Nethermind/Chains/themerge_kintsugi_m2.json +/// https://github.com/NethermindEth/nethermind/blob/kiln/src/Nethermind/Chains/themerge_kiln_testvectors.json pub fn nethermind_genesis_json() -> Value { json!({ "name": "TheMerge_Devnet", @@ -54,63 +54,63 @@ pub fn nethermind_genesis_json() -> Value { "epoch": 30000 } } - }, - "params": { - "gasLimitBoundDivisor": "0x400", - "accountStartNonce": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID": 1, - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip158Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip140Transition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip145Transition": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip1283Transition": "0x0", - "eip1283DisableTransition": "0x0", - "eip152Transition": "0x0", - "eip1108Transition": "0x0", - "eip1344Transition": "0x0", - "eip1884Transition": "0x0", - "eip2028Transition": "0x0", - "eip2200Transition": "0x0", - "eip2565Transition": "0x0", - "eip2929Transition": "0x0", - "eip2930Transition": "0x0", - "eip1559Transition": "0x0", - "eip3198Transition": "0x0", - "eip3529Transition": "0x0", - "eip3541Transition": "0x0" - }, - "genesis": { - "seal": { - "ethereum": { - "nonce": "0x42", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - }, - "difficulty": "0x000000000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit":"0x1C9C380", - "author": "0x0000000000000000000000000000000000000000", - "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas":"0x7" - }, - "accounts": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance":"0x6d6172697573766477000000" - } - } + }, + "params": { + "gasLimitBoundDivisor": "0x400", + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID": 1, + "eip150Transition": "0x0", + "eip155Transition": "0x0", + "eip158Transition": "0x0", + "eip160Transition": "0x0", + "eip161abcTransition": "0x0", + "eip161dTransition": "0x0", + "eip140Transition": "0x0", + "eip211Transition": "0x0", + "eip214Transition": "0x0", + "eip658Transition": "0x0", + "eip145Transition": "0x0", + "eip1014Transition": "0x0", + "eip1052Transition": "0x0", + "eip1283Transition": "0x0", + "eip1283DisableTransition": "0x0", + "eip152Transition": "0x0", + "eip1108Transition": "0x0", + "eip1344Transition": "0x0", + "eip1884Transition": "0x0", + "eip2028Transition": "0x0", + "eip2200Transition": "0x0", + "eip2565Transition": "0x0", + "eip2929Transition": "0x0", + "eip2930Transition": "0x0", + "eip1559Transition": "0x0", + "eip3198Transition": "0x0", + "eip3529Transition": "0x0", + "eip3541Transition": "0x0" + }, + "genesis": { + "seal": { + "ethereum": { + "nonce": "0x42", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty": "0x400000000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit":"0x1C9C380", + "author": "0x0000000000000000000000000000000000000000", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas":"0x7" + }, + "accounts": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance":"0x6d6172697573766477000000" + } + } }) } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index bc07cea343..7a6a3803e6 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,8 +7,8 @@ use std::{env, fs::File}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const GETH_BRANCH: &str = "merge-kiln-v2"; -const GETH_REPO_URL: &str = "https://github.com/MariusVanDerWijden/go-ethereum"; +const GETH_BRANCH: &str = "master"; +const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { Command::new("make") diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index b788a7565c..79661354de 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -5,7 +5,8 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ - Address, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, Slot, Uint256, + Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, + MainnetEthSpec, Slot, Uint256, }; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); @@ -171,7 +172,7 @@ impl TestRig { let valid_payload = self .ee_a .execution_layer - .get_payload::( + .get_payload::>( parent_hash, timestamp, prev_randao, @@ -179,7 +180,8 @@ impl TestRig { proposer_index, ) .await - .unwrap(); + .unwrap() + .execution_payload; /* * Execution Engine A: @@ -212,6 +214,7 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); + check_payload_reconstruction(&self.ee_a, &valid_payload).await; /* * Execution Engine A: @@ -262,7 +265,7 @@ impl TestRig { let second_payload = self .ee_a .execution_layer - .get_payload::( + .get_payload::>( parent_hash, timestamp, prev_randao, @@ -270,7 +273,8 @@ impl TestRig { proposer_index, ) .await - .unwrap(); + .unwrap() + .execution_payload; /* * Execution Engine A: @@ -285,6 +289,7 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); + check_payload_reconstruction(&self.ee_a, &second_payload).await; /* * Execution Engine A: @@ -356,6 +361,7 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); + check_payload_reconstruction(&self.ee_b, &valid_payload).await; /* * Execution Engine B: @@ -369,6 +375,7 @@ impl TestRig { .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); + check_payload_reconstruction(&self.ee_b, &second_payload).await; /* * Execution Engine B: @@ -389,6 +396,22 @@ impl TestRig { } } +/// Check that the given payload can be re-constructed by fetching it from the EE. +/// +/// Panic if payload reconstruction fails. +async fn check_payload_reconstruction( + ee: &ExecutionPair, + payload: &ExecutionPayload, +) { + let reconstructed = ee + .execution_layer + .get_payload_by_block_hash(payload.block_hash) + .await + .unwrap() + .unwrap(); + assert_eq!(reconstructed, *payload); +} + /// Returns the duration since the unix epoch. pub fn timestamp_now() -> u64 { SystemTime::now() diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index c2f435fabe..6770508435 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -10,7 +10,7 @@ edition = "2021" node_test_rig = { path = "../node_test_rig" } eth1 = {path = "../../beacon_node/eth1"} types = { path = "../../consensus/types" } -parking_lot = "0.11.0" +parking_lot = "0.12.0" futures = "0.3.7" tokio = "1.14.0" eth1_test_rig = { path = "../eth1_test_rig" } diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 3668cf0064..6cfc3e6db7 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -107,15 +107,16 @@ impl LocalNetwork { beacon_config.network.discv5_config.table_filter = |_| true; } - let mut write_lock = self_1.beacon_nodes.write(); - let index = write_lock.len(); - + // We create the beacon node without holding the lock, so that the lock isn't held + // across the await. This is only correct if this function never runs in parallel + // with itself (which at the time of writing, it does not). + let index = self_1.beacon_nodes.read().len(); let beacon_node = LocalBeaconNode::production( self.context.service_context(format!("node_{}", index)), beacon_config, ) .await?; - write_lock.push(beacon_node); + self_1.beacon_nodes.write().push(beacon_node); Ok(()) } diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index e328938db1..3bb460c9fe 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -62,6 +62,9 @@ fn syncing_sim( let end_after_checks = true; let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); + // Set fork epochs to test syncing across fork boundaries + spec.altair_fork_epoch = Some(Epoch::new(1)); + spec.bellatrix_fork_epoch = Some(Epoch::new(2)); spec.seconds_per_slot /= speed_up_factor; spec.seconds_per_slot = max(1, spec.seconds_per_slot); spec.eth1_follow_distance = 16; @@ -86,6 +89,8 @@ fn syncing_sim( beacon_config.dummy_eth1_backend = true; beacon_config.sync_eth1_chain = true; + beacon_config.http_api.allow_sync_stalled = true; + beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); // Generate the directories and keystores required for the validator clients. diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 128c4a6fe9..800f988654 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -36,7 +36,9 @@ mod tests { use types::*; use url::Url; use validator_client::{ - initialized_validators::{load_pem_certificate, InitializedValidators}, + initialized_validators::{ + load_pem_certificate, load_pkcs12_identity, InitializedValidators, + }, validator_store::ValidatorStore, SlashingDatabase, SLASHING_PROTECTION_FILENAME, }; @@ -108,7 +110,18 @@ mod tests { } fn root_certificate_path() -> PathBuf { - tls_dir().join("cert.pem") + tls_dir().join("lighthouse").join("web3signer.pem") + } + + fn client_identity_path() -> PathBuf { + tls_dir().join("lighthouse").join("key.p12") + } + + fn client_identity_password() -> String { + fs::read_to_string(tls_dir().join("lighthouse").join("password.txt")) + .unwrap() + .trim() + .to_string() } /// A testing rig which holds a live Web3Signer process. @@ -155,8 +168,9 @@ mod tests { File::create(&keystore_dir.path().join("key-config.yaml")).unwrap(); serde_yaml::to_writer(key_config_file, &key_config).unwrap(); - let tls_keystore_file = tls_dir().join("key.p12"); - let tls_keystore_password_file = tls_dir().join("password.txt"); + let tls_keystore_file = tls_dir().join("web3signer").join("key.p12"); + let tls_keystore_password_file = tls_dir().join("web3signer").join("password.txt"); + let tls_known_clients_file = tls_dir().join("web3signer").join("known_clients.txt"); let stdio = || { if SUPPRESS_WEB3SIGNER_LOGS { @@ -173,7 +187,10 @@ mod tests { )) .arg(format!("--http-listen-host={}", listen_address)) .arg(format!("--http-listen-port={}", listen_port)) - .arg("--tls-allow-any-client=true") + .arg(format!( + "--tls-known-clients-file={}", + tls_known_clients_file.to_str().unwrap() + )) .arg(format!( "--tls-keystore-file={}", tls_keystore_file.to_str().unwrap() @@ -193,8 +210,11 @@ mod tests { let url = Url::parse(&format!("https://{}:{}", listen_address, listen_port)).unwrap(); let certificate = load_pem_certificate(root_certificate_path()).unwrap(); + let identity = + load_pkcs12_identity(client_identity_path(), &client_identity_password()).unwrap(); let http_client = Client::builder() .add_root_certificate(certificate) + .identity(identity) .build() .unwrap(); @@ -358,6 +378,8 @@ mod tests { url: signer_rig.url.to_string(), root_certificate_path: Some(root_certificate_path()), request_timeout_ms: None, + client_identity_path: Some(client_identity_path()), + client_identity_password: Some(client_identity_password()), }, }; ValidatorStoreRig::new(vec![validator_definition], spec).await diff --git a/testing/web3signer_tests/tls/cert.pem b/testing/web3signer_tests/tls/cert.pem deleted file mode 100644 index 7f2d5f1f2c..0000000000 --- a/testing/web3signer_tests/tls/cert.pem +++ /dev/null @@ -1,32 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFmTCCA4GgAwIBAgIUd6yn4o1bKr2YpzTxcBmoiM4PorkwDQYJKoZIhvcNAQEL -BQAwajELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 -eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRIwEAYD -VQQDDAkxMjcuMC4wLjEwIBcNMjEwOTA2MDgxMDU2WhgPMjEyMTA4MTMwODEwNTZa -MGoxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTERMA8GA1UEBwwIU29tZUNpdHkx -EjAQBgNVBAoMCU15Q29tcGFueTETMBEGA1UECwwKTXlEaXZpc2lvbjESMBAGA1UE -AwwJMTI3LjAuMC4xMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAx/a1 -SRqehj/D18166GcJh/zOyDtZCbeoLWcVfS1aBq+J1FFy4LYKWgwNhOYsrxHLhsIr -/LpHpRm/FFqLPxGNoEPMcJi1dLcELPcJAG1l+B0Ur52V/nxOmzn71Mi0WQv0oOFx -hOtUOToY3heVW0JXgrILhdD834mWdsxBWPhq1LeLZcMth4woMgD9AH4KzxUNtFvo -8i8IneEYvoDIQ8dGZ5lHnFV5kaC8Is0hevMljTw83E9BD0B/bpp+o2rByccVulsy -/WK763tFteDxK5eZZ3/5rRId+uoN5+D4oRnG6zuki0t7+eTZo1cUPi28IIDTNjPR -Xvw35dt+SdTDjtI/FUf8VWhLIHZZXaevFliuBbcuOMpWCdjAdwb7Uf9WpMnxzZtK -fatAC9dk3VPsehFcf6w/H+ah3tu/szAaDJ5zZb0m05cAxDZekZ9SccBIPglccM3f -vzNjrDIoi4z7uCiTJc2FW0qb2MzusQsGjtLW53n7IGoSIFDvOhiZa9D+vOE2wG6o -VNf2K9/QvwNDCzRvW81mcUCRr/BhcAmX5drwYPwUEcdBXQeFPt6nZ33fmIgl2Cbv -io9kUJzjlQWOZ6BX5FmC69dWAedcfHGY693tG6LQKk9a5B+NiuIB4m1bHcvjYhsh -GqVrw980YIN52RmIoskGRdt34/gKHWcqjIEK0+kCAwEAAaM1MDMwCwYDVR0PBAQD -AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZI -hvcNAQELBQADggIBAILVu5ppYnumyxvchgSLAi/ahBZV/wmtI3X8vxOHuQwYF8rZ -7b2gd+PClJBuhxeOEJZTtCSDMMUdlBXsxnoftp0TcDhFXeAlSp0JQe38qGAlX94l -4ZH39g+Ut5kVpImb/nI/iQhdOSDzQHaivTMjhNlBW+0EqvVJ1YsjjovtcxXh8gbv -4lKpGkuT6xVRrSGsZh0LQiVtngKNqte8vBvFWBQfj9JFyoYmpSvYl/LaYjYkmCya -V2FbfrhDXDI0IereknqMKDs8rF4Ik6i22b+uG91yyJsRFh63x7agEngpoxYKYV6V -5YXIzH5kLX8hklHnLgVhES2ZjhheDgC8pCRUCPqR4+KVnQcFRHP9MJCqcEIFAppD -oHITdiFDs/qE0EDV9WW1iOWgBmdgxUZ8dh1CfW+7B72+Uy0/eXWdnlrRDe5cN/hs -xXpnLCMfzSDEMA4WmImabpU/fRXL7pazZENJj7iyIAr/pEL34+QjqVfWaXkWrHoN -KsrkxTdoZNVdarBDSw9JtMUECmnWYOjMaOm1O8waib9H1SlPSSPrK5pGT/6h1g0d -LM982X36Ej8XyW33E5l6qWiLVRye7SaAvZbVLsyd+cfemi6BPsK+y09eCs4a+Qp7 -9YWZOPT6s/ahJYdTGF961JZ62ypIioimW6wx8hAMCkKKfhn1WI0+0RlOrjbw ------END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index 1e45bb61b5..f00e7b7e37 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,4 +1,7 @@ #!/bin/bash -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout key.key -out cert.pem -config config && -openssl pkcs12 -export -out key.p12 -inkey key.key -in cert.pem -password pass:$(cat password.txt) - +openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && +openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && +cp web3signer/cert.pem lighthouse/web3signer.pem && +openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && +openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/key.key b/testing/web3signer_tests/tls/key.key deleted file mode 100644 index 6f1331db1a..0000000000 --- a/testing/web3signer_tests/tls/key.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDH9rVJGp6GP8PX -zXroZwmH/M7IO1kJt6gtZxV9LVoGr4nUUXLgtgpaDA2E5iyvEcuGwiv8ukelGb8U -Wos/EY2gQ8xwmLV0twQs9wkAbWX4HRSvnZX+fE6bOfvUyLRZC/Sg4XGE61Q5Ohje -F5VbQleCsguF0PzfiZZ2zEFY+GrUt4tlwy2HjCgyAP0AfgrPFQ20W+jyLwid4Ri+ -gMhDx0ZnmUecVXmRoLwizSF68yWNPDzcT0EPQH9umn6jasHJxxW6WzL9Yrvre0W1 -4PErl5lnf/mtEh366g3n4PihGcbrO6SLS3v55NmjVxQ+LbwggNM2M9Fe/Dfl235J -1MOO0j8VR/xVaEsgdlldp68WWK4Fty44ylYJ2MB3BvtR/1akyfHNm0p9q0AL12Td -U+x6EVx/rD8f5qHe27+zMBoMnnNlvSbTlwDENl6Rn1JxwEg+CVxwzd+/M2OsMiiL -jPu4KJMlzYVbSpvYzO6xCwaO0tbnefsgahIgUO86GJlr0P684TbAbqhU1/Yr39C/ -A0MLNG9bzWZxQJGv8GFwCZfl2vBg/BQRx0FdB4U+3qdnfd+YiCXYJu+Kj2RQnOOV -BY5noFfkWYLr11YB51x8cZjr3e0botAqT1rkH42K4gHibVsdy+NiGyEapWvD3zRg -g3nZGYiiyQZF23fj+AodZyqMgQrT6QIDAQABAoICAGMICuZGmaXxJIPXDvzUMsM3 -cA14XvNSEqdRuzHAaSqQexk8sUEaxuurtnJQMGcP0BVQSsqiUuMwahKheP7mKZbq -nPBSoONJ1HaUbc/ZXjvP4zPKPsPHOoLj55WNRMwpAKFApaDnj1G8NR6g3WZR59ch -aFWAmAv5LxxsshxnAzmQIShnzj+oKSwCk0pQIfhG+/+L2UVAB+tw1HlcfFIc+gBK -yE1jg46c5S/zGZaznrBg2d9eHOF51uKm/vrd31WYFGmzyv/0iw7ngTG/UpF9Rgsd -NUECjPh8PCDPqTLX+kz7v9UAsEiljye2856LtfT++BuK9DEvhlt/Jf9YsPUlqPl3 -3wUG8yiqBQrlGTUY1KUdHsulmbTiq4Q9ch5QLcvazk+9c7hlB6WP+/ofqgIPSlDt -fOHkROmO7GURz78lVM8+E/pRgy6qDq+yM1uVMeWWme4hKfOAL2lnJDTO4PKNQA4b -03YXsdVSz4mm9ppnyHIPXei6/qHpU/cRRf261HNEI16eC0ZnoIAxhORJtxo6kMns -am4yuhHm9qLjbOI1uJPAgpR/o0O5NaBgkdEzJ102pmv2grf2U743n9bqu+y/vJF9 -HRmMDdJgZSmcYxQuLe0INzLDnTzOdmjbqjB6lDsSwtrEo/KLtXIStrFMKSHIE/QV -96u8nWPomN83HqkVvQmBAoIBAQDrs8eKAQ3meWtmsSqlzCNVAsJA1xV4DtNaWBTz -MJXwRWywem/sHCoPsJ7c5UTUjQDOfNEUu8iW/m60dt0U+81/O9TLBP1Td6jxLg8X -92atLs8wHQDUqrgouce0lyS7to+R3K+N8YtWL2y9w9jbf/XT9iTL5TXGc8RFrmMg -nDQ1EShojU0U0I1lKpDJTx2R1FANfyd3iHSsENRwYj5MF8iQSag79Ek06BKLWHHt -OJj2oiO3VIAKQYVA9aKxfiiOWXWumPHq7r6UoNJK3UNzfBvguhEzl8k6VjZBCR9q -WwvSTba4mOgHMIXdV/9Wr3y8Cus2lX5YGOK4OUx/ZaCdaBtZAoIBAQDZLwwZDHen -Iw1412m/D/6HBS38bX78t+0hL7LNqgVpiZdNbLq57SGRbUnZZ/jlmtyLw3be6BV3 -IcLyflYW+4Wi8AAqVADlXjMC+GIuDNCCicwWxJeIFaAGM7Jt6Fa08H/loIAMM7NC -y1CmQnCR9OnHRdcBaU1y4ForP4f8B/hwh3hSQEFPKgF/MQwDnR7UzPgRrUOTovN/ -4D7j1Wx6FpYX9hGZL0i2K1ygRZE03t6VV7xhCkne96VvDEj1Zo/S4HFaEmDD+EjR -pvXVhPRed7GZ6AMs2JxOPhRiu3G+AQL1HPMDlA8QiPtTh0Zf99j/5NXKBEyH/fp1 -V04L1s7wf7sRAoIBAQCb3/ftJ0dXDSNe9Xl7ziXrmXh3wwYasMtLawbn0VDHZlI7 -36zW28VhPO/CrAi5/En1RIxNBubgHIF/7T/GGcRMCXhvjuwtX+wlG821jtKjY1p3 -uiaLfh9uJ3aP0ojjbxdBYk3jNENuisyCLtviRZyAQb8R7JKEnJjHcE10CnloQuGT -SycXxdhMeDrqNt0aTOtoEZg7L83g4PxtGjuSvQPRkDSm+aXUTEm/R42IUS6vpIi0 -PDi1D6GdVRT0BrexdC4kelc6hAsbZcPM6MkrvX7+Pm8TzKSyZMNafTr+bhnCScy2 -BcEkyA0vVXuyizmVbi8hmPnGLyb4qEQT2FTA5FF5AoIBAQCEj0vCCjMKB8IUTN7V -aGzBeq7b0PVeSODqjZOEJk9RYFLCRigejZccjWky0lw/wGr2v6JRYbSgVzIHEod3 -VaP2lKh1LXqyhPF70aETXGz0EClKiEm5HQHkZy90GAi8PcLCpFkjmXbDwRcDs6/D -1onOQFmAGgbUpA1FMmzMrwy7mmQdR+zU5d2uBYDAv+jumACdwXRqq14WYgfgxgaE -6j5Id7+8EPk/f230wSFk9NdErh1j2YTHG76U7hml9yi33JgzEt6PHn9Lv61y2sjQ -1BvJxawSdk/JDekhbil5gGKOu1G0kG01eXZ1QC77Kmr/nWvD9yXDJ4j0kAop/b2n -Wz8RAoIBAQDn1ZZGOJuVRUoql2A65zwtu34IrYD+2zQQCBf2hGHtwXT6ovqRFqPV -vcQ7KJP+zVT4GimFlZy7lUx8H4j7+/Bxn+PpUHHoDYjVURr12wk2w8pxwcKnbiIw -qaMkF5KG2IUVb7F8STEuKv4KKeuRlB4K2HC2J8GZOLXO21iOqNMhMRO11wp9jkKI -n83wtLH34lLRz4VzIW3rfvPeVoP1zoDkLvD8k/Oyjrf4Bishg9vCHyhQkB1JDtMU -1bfH8mxwKozakpJa23a8lE5NLoc9NOZrKM4+cefY1MZ3FjlaZfkS5jlhY4Qhx+fl -+9j5xRPaH+mkJHaJIqzQad+b1A2eIa+L ------END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/key.p12 b/testing/web3signer_tests/tls/key.p12 deleted file mode 100644 index 2f19e57f02..0000000000 Binary files a/testing/web3signer_tests/tls/key.p12 and /dev/null differ diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem new file mode 100644 index 0000000000..061b0e3cd7 --- /dev/null +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFmzCCA4OgAwIBAgIUXpTV/0rd/GAoCfCyzPOtwcb4t7YwDQYJKoZIhvcNAQEL +BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 +eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD +VQQDDApsaWdodGhvdXNlMCAXDTIyMDUxMTEzNDEwOFoYDzIxMjIwNDE3MTM0MTA4 +WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 +MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV +BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC0 +HrD6fJGcqm8zwEs+Y+FGIpRYPyjdlugj3qqwvMSI9jeDW2fr1zUl/wIuf4o+O16P +XZitHgAyg3lph1x/kKL59c4rwWxUabSudAQZ6YCJHo4jWf3hR+UmMQEdNPgNrofv +vGCA7CjLPKZfW6pzZo9kvMwbgeRNuJCuKZ0v/p9Y/lOplj+TTBq16HMtsSarib3b +nKEaRdLCQgTJS3vwbtEiCC9BcZAkvs0fmVUIENRVeKGZIqcAdiOTUPvs4zctchzJ +MGG+TA2ckKIpGT0F4be8gy1uHyP0fncJAtNvkGRPmVQcNew/HIIkJjiJvmrwewn4 +dYqYAe+aEL5AB4dZhlKjIPENfq38t7iY/aXV8COTQZGMEZ7Diext1JmEb34vEXgS +7Gk9ZSCp/1X+fk/wW4uQeRlGwblaRtRxBrfJWmEoQHohzyP4jog8dajSZTjUbsA+ +HGaeZo1k3M0i3lxRBbLGamPODIO9CVGwKaiEJTy4bEpreM2tLR1rk5JECf46WPUR +SN6OdHrO5x38wzQlUv+Hb4vN4p0ZkiGJO62Duuw6hbGA6UIBffM20QuJUtz3Pa8D +un/NunIagmIL5KCsrDtZkt5wBsX3XU6OPdfZrfgOIXNfQmpbbeAUOok1NOgszXjP +DKCsnxZZBtPhXC1VnRkiWK50GNmWe8MLqXR/G12TXwIDAQABozUwMzALBgNVHQ8E +BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkq +hkiG9w0BAQsFAAOCAgEAcCGqC1nhjDiuF87LgzwuKdMV4NEpTGHa8aHrta/UrzO3 +Lf7fcQvB83tR3ZFk9ndlnDbTVr0seAqDDdJxUHyaA3lX6F5g8G6W8bm76w8b5vot +Vl4ohfcA0CIxbCpp773V0qjyZNj9wDIZg8cX8mXcRi4XoUDltD5/yUwRLVjjvJba +tF+vD3NWWuCGRu65qdR3JYJGr4MtbVo06uoeBXcgZrcDsb93chlsuyH337twq2fn +QbqHbuyxAjFxtv125Jmu6li3pu9FUQrnQWQVHzvt2zvR44vOx+yDQHtil9U7H0aU +Nrzqr9OPOApCr7oQ8GoHYn4C7TAs12U/xiPsvuM1puTzbw8ofuKczFRIA8nuyUHU +XTP/9oYyZ/Vs9qyAtIVCCyEfhSobfwZLLFAT4RWzQZ4H0JmtXfNdt+PFPSWg5MZA +W321uulq/JSa4MQUJbNUEeNYeG+NqjhviM00irpt2Baz2EbVAJMT4ClndRQOwrKT +15+icdyvgx5uZbEuvXK6kyU0AHESHxhzN6C5eHPEYkMjVYgftbE7R3cp9TEj3VvK +Ecd1SXTtKOq2J91te10UrceURqquGuGXVUO7PYGVYBNugjlH47qRIwtI0njPg3ep +10XBwkOm1CgvZxHaj4P0NJf+wih+K8Z5Dg1+90nnJ4mxGFFIW8m7Cfn1tPFmEPo= +-----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/config b/testing/web3signer_tests/tls/lighthouse/config similarity index 95% rename from testing/web3signer_tests/tls/config rename to testing/web3signer_tests/tls/lighthouse/config index d19a89b02f..6295f7fa01 100644 --- a/testing/web3signer_tests/tls/config +++ b/testing/web3signer_tests/tls/lighthouse/config @@ -10,7 +10,7 @@ ST = VA L = SomeCity O = MyCompany OU = MyDivision -CN = 127.0.0.1 +CN = lighthouse [v3_req] keyUsage = keyEncipherment, dataEncipherment extendedKeyUsage = serverAuth diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key new file mode 100644 index 0000000000..bbc69ca38b --- /dev/null +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC0HrD6fJGcqm8z +wEs+Y+FGIpRYPyjdlugj3qqwvMSI9jeDW2fr1zUl/wIuf4o+O16PXZitHgAyg3lp +h1x/kKL59c4rwWxUabSudAQZ6YCJHo4jWf3hR+UmMQEdNPgNrofvvGCA7CjLPKZf +W6pzZo9kvMwbgeRNuJCuKZ0v/p9Y/lOplj+TTBq16HMtsSarib3bnKEaRdLCQgTJ +S3vwbtEiCC9BcZAkvs0fmVUIENRVeKGZIqcAdiOTUPvs4zctchzJMGG+TA2ckKIp +GT0F4be8gy1uHyP0fncJAtNvkGRPmVQcNew/HIIkJjiJvmrwewn4dYqYAe+aEL5A +B4dZhlKjIPENfq38t7iY/aXV8COTQZGMEZ7Diext1JmEb34vEXgS7Gk9ZSCp/1X+ +fk/wW4uQeRlGwblaRtRxBrfJWmEoQHohzyP4jog8dajSZTjUbsA+HGaeZo1k3M0i +3lxRBbLGamPODIO9CVGwKaiEJTy4bEpreM2tLR1rk5JECf46WPURSN6OdHrO5x38 +wzQlUv+Hb4vN4p0ZkiGJO62Duuw6hbGA6UIBffM20QuJUtz3Pa8Dun/NunIagmIL +5KCsrDtZkt5wBsX3XU6OPdfZrfgOIXNfQmpbbeAUOok1NOgszXjPDKCsnxZZBtPh +XC1VnRkiWK50GNmWe8MLqXR/G12TXwIDAQABAoICAQCXUo2W856Vwy5HiQ7t7JWv +CZAdj3pyp7yBnilC8GQhONGsntdw8M2rDVG05Nusqs4nnheNoX3C8mfHO7x/Q3FY +lKTQZ+DuDhyIz9k+N8kP6ca6dnlvkao3asYn1n9rZyy3QUjGJyGilWKlDGroJsrj +dCX6GidHEH8kgruXPdB7wLdi62KgCjkKiK5zPbhiNwd1gGJsoyqMn1BMGQmYFlHG +yJ+C2Lij1lSYboZcj18EK6N/9vfc0GPU+R2dh8qseIkskWQcruJknbJO2vBEh7yI +OKCrOqhHWRQCUwh1WxabNRLP3JGM+BNx8VZgisRnIsdeoMl+KWo1wklDm8+fa9Tx +4xquIy+4PzmobWXiWBpirF7bTNhyZ4vIaMSTOP5TYiliom/hJtcpAwLf9eXxMfti +vRAogZEtr0eKTieH72dwsBVx6wNlxhazvD+ZKIq7OIzJRA6Do2H+BAmz/l4mgVR/ +geL3u0fn0j/Y+8OyFE3P+8D/PqgPzLgTYa5QSp6JtHxNlVcmWefJiLtZDAJvPpeo +UVsA+E2BHsrGveLk15GF9F+vJ867qKT7luQac3zF7V0hE9pktUKM2gY+Jy455w5i +cMxyjt4RAKY8AHAmFvCRQHNdjU2o1UjVFgYsQTYsOdvAiyq0xEJFkbeR2Zxz2sJW +JWK+YlT+UEGDL5SCaXzP4QKCAQEA7gRAy/Xq0Fjq7UZvc7oJ62h6BmseFL9BuKlW +QmvVFAilYeQVejl/ubafyL4Z9ntEeCGTkv8H4DeALs9A/isFOcDxZDoelCETrSxI +CfXllob24276eTc5dBdHmofBjRgIbovnyuFRYzK5uDalVAxYsZPFOp9/qtGa25ex +uIcyJwX+ivqqtA9B5CHu7p/znNrp155xLwGpVczx4xGqjPPr5N2rwZFOXufGFULH +AKbJBSUxiMMJnb1rN8aIuTo/Utr3/i7hc7AUO3//qieyjLdXe8tESqgxzTNvfZk3 +qYtPk4GSHql7Eesxg19fzVdG+LTnzfRKOfOtcZJPRFGGW29fjwKCAQEAwbqXsZvC +7AmmmeVVAPL7q5pXAxSEMK7VsJzPJ7G6MRQ37YjkNRcCf7SRQqNBGQubVkv3Qzvc +rmMhT9I5QfCR2JXQtrH1y09eS45T6NYbRkT6NA3E3XNmRIPO+wIeDV32v5jJwhIk +7ayuG2zBsAryxNvg3us3pWHeIQ45sX0JqNil6BTemYRBrCZmCRWHndl72zDbtR23 +kVt9GKaycSPyCZQ7yE4ZWD2VsrbgEidVJEQagknsjQrldMO68GLbHCP2ZyrIUhKN +2eeuHJpZPz+pahQ55MAEvjIsJKPWsg8cut2Vo4sqgez+xiz0v/nWiPLtvxdN+DHP +tAVbrw+0NeqnMQKCAQB3GsO+DLpLNiOhRpzhAViTZ32glpu/8BEYMgzLQiCnXMg9 +myAwQHOs4DlG//IICJkzsEGjzmEHj15iji3MwoRj6SwiZn8EyySIhN8rtNQFplYH +a3KFk9/5OukG6CYvz7Xwc6wzNts+U5TiHN5Ql7kOa47HjicZuLfQaTFy0JyFMJe2 +vkcLwZLMcTqaSIpklJtt3Yhv6FnvaJYmdaGt1SXXKiIXw/m+via+XuMsbUmsfHc0 +I709JRtxFrU2U3J6qL5ugNEqzhLhz2SFpkXP6rMpbIcpAM+jCrkg1bon6mGQw8b1 +9wNx7Qqi3egX3jPSotxYkIVQSKMjcP6fhlhAixP7AoIBAH1ynKQwHurF3RIuxPqW +XY3jpZCjCm6T6GAzSpmDpvP9CbJRQKV4Pu//N0kVeiQDthUNoBHzg5WRL5MGqHkg +lPDRIpQLbQS4YnE+uus9KfA43mQyvlZAUerwB2nXFyrEu/GZuJxpL2yQszWjGVEr +5cTANT9kxWXcmACDu6xJMaYalGRSj0qNsBEP1GbxgB4hJOjtHHiNw77mpXz/BPHq +uuKlEIlGuXbAel19ul9HBQU07I2N3RYABlG0JStgeE4io35u38T1qtF+CusOr9gb +G1NLwal1Bh07VAZt6arnykzfC/UZOu9jTh96IQrnd5q65GUnbB/Z8Yu7JIGaA7Ie +PyECggEAPZlzqPCdNcmdoCSNIDCDYZBVf2xZX8591xdphMG59Jrckp5kl5LM5bjQ +tysj1LJpMK+l60b3r8BI8a4lvj+eBqwBUck82/IImTedE9/oLF3Z64kLd1tr3aGa +W5jLXjThFF20BqfD+YbmFVEdHTwN2L+4kN0VvP/6oLadxogTLwQruMFoPlsD4B19 +HDcAKe6OnyWMer/X9nq9OY6GFGc4X6wHjJ8pj4aa4HE8VNNq40GMkRZOZaJvaPqh +orK9SC50qdJtrVQeD4fhfZMVzmRyE4RSSQBPfc9zq/sO/pjUfV9uK4c99FDbviIf +JAkxGuYLZeyrHEyeKLm7S77SLipKWg== +-----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 new file mode 100644 index 0000000000..22b7d7f425 Binary files /dev/null and b/testing/web3signer_tests/tls/lighthouse/key.p12 differ diff --git a/testing/web3signer_tests/tls/lighthouse/password.txt b/testing/web3signer_tests/tls/lighthouse/password.txt new file mode 100644 index 0000000000..16da1460ff --- /dev/null +++ b/testing/web3signer_tests/tls/lighthouse/password.txt @@ -0,0 +1 @@ +bark diff --git a/testing/web3signer_tests/tls/lighthouse/web3signer.pem b/testing/web3signer_tests/tls/lighthouse/web3signer.pem new file mode 100644 index 0000000000..460cb8b400 --- /dev/null +++ b/testing/web3signer_tests/tls/lighthouse/web3signer.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFmzCCA4OgAwIBAgIUSHwf3lJKpa1BNR9rFOmxhoKTD1MwDQYJKoZIhvcNAQEL +BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 +eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD +VQQDDAp3ZWIzc2lnbmVyMCAXDTIyMDUxMTEzNDEwOFoYDzIxMjIwNDE3MTM0MTA4 +WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 +MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDr +aQUU4O7K/aBAiH86RV3ye/Q7vguwplUNku317chzyFdB+OnGSUga6+zjdUmr8+49 +nki1q0rLEU/xJ0NpffTdzFgk1nk6Jh7Ly26q18SNpwpuwdvbajnTeh+BPSWZQL85 +xfO9th/RkJkgpzKukxK/npjvU6PbwiufSWI7mXNIgR0lIIacFXZ4RsD1PxZo/07k +toF0N+yLGW76yfeINRw43bG1MQxklePsk6zAUqJEi0tZmXqzh1NZHH5Q1VAEKKPW +yAVTDi3bWmvh3iSfgmckesjwUHANFeMhLpdiVTOi31OaILpx9HGRYYnqjW1AUZLo +SMKkyPsm6IN60GpAVI7TP3URVpTPPW78UeEUyeYN06tABYJsFWGFChg9Hf2yvcZU +2DDGdHpxut6h4WAwx9oL5rG4VSxFjhVi6ty3Hb9B0YFE/WNfV07wWPSQADZSK/kt +fhE+8zavQzjsxm2f1Ko5L/x8cIc5MS1xyaXn/UkoqH3QdWZC1aLs9NCl4F8ZE06g +jjvN9WdsCXmTEShqaXoRsZG7SfcQsu4gUUZ/fjbJ5hRf+QxMMKv42SUpqsRhslEF +/Pqu0WQd82CgG1a7XnfUO8BYSchTJZL55vx40ZZuQAu/ULsF7toa0lktijBxCPn3 +8HEnyLEyA3e8a93P0myWoxFn/fUpegT3TVSv33anqwIDAQABozUwMzALBgNVHQ8E +BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkq +hkiG9w0BAQsFAAOCAgEA1Bn7mpa2eJUo4+1X5lVLWWwtXLAfKiBf6OWNfacLV6FL +gyKpvvESTGuA5VAS0O97TPd7uyzEbUMS75TdmfAT8zecO2aXMb7aTyX+QbMj2gmk +zou72Fl4o6V1IvYpjKaNBZCS3Hk67ivRYbQCamEOk5UX9/wCdLvC9PH5Y+WqcPaz +7RLXe3OXhRbfFax4+pWzZxsgSKrEi8ZZ5gRa/bdJVVsTqk9LwS/CbMjEAkdzIBLt +cQb9BcnTJcQvp6ehNIVMdEC7GLXcDkefw7CL1ZfEh3DoJD3hiR6QwdWtdG0etoUf +w8LHZhCJD0IZxLMHiE+qiN4xkx+cznol+gAc9sfmtVK1CAW9l1Aa8zw5AfAyCg3h +jr6ymfwY8zlO21yBmCTg2+yTbU/0CqkgimQeztoYCh7+67QgnSCJMk2ffR6GPj1q +pfLI/5QNoxdFvR/lkwj5h/HRp9JZKTV/R/g0Va4Arg3Y7RTezjCYkJnX37ScnQhg +JLIeXmksFkc+Oz3yA+r60rR72+lsVzE87BCs+L0y16zcQnU5NqJXrSMMqCkjbs9l +b682+tnJKLFGQrYia/FL/Sc2L2Tn5hba5wWQTMjGujg76fkMc6VIv1qG3VGR/V1G +r11UJ+WjEcdrwZUm7E76p9DfTce52kGqGXwfrv6kQjvLhipwjzgv429txzDy82k= +-----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/cert.pem b/testing/web3signer_tests/tls/web3signer/cert.pem new file mode 100644 index 0000000000..460cb8b400 --- /dev/null +++ b/testing/web3signer_tests/tls/web3signer/cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFmzCCA4OgAwIBAgIUSHwf3lJKpa1BNR9rFOmxhoKTD1MwDQYJKoZIhvcNAQEL +BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 +eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD +VQQDDAp3ZWIzc2lnbmVyMCAXDTIyMDUxMTEzNDEwOFoYDzIxMjIwNDE3MTM0MTA4 +WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 +MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV +BAMMCndlYjNzaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDr +aQUU4O7K/aBAiH86RV3ye/Q7vguwplUNku317chzyFdB+OnGSUga6+zjdUmr8+49 +nki1q0rLEU/xJ0NpffTdzFgk1nk6Jh7Ly26q18SNpwpuwdvbajnTeh+BPSWZQL85 +xfO9th/RkJkgpzKukxK/npjvU6PbwiufSWI7mXNIgR0lIIacFXZ4RsD1PxZo/07k +toF0N+yLGW76yfeINRw43bG1MQxklePsk6zAUqJEi0tZmXqzh1NZHH5Q1VAEKKPW +yAVTDi3bWmvh3iSfgmckesjwUHANFeMhLpdiVTOi31OaILpx9HGRYYnqjW1AUZLo +SMKkyPsm6IN60GpAVI7TP3URVpTPPW78UeEUyeYN06tABYJsFWGFChg9Hf2yvcZU +2DDGdHpxut6h4WAwx9oL5rG4VSxFjhVi6ty3Hb9B0YFE/WNfV07wWPSQADZSK/kt +fhE+8zavQzjsxm2f1Ko5L/x8cIc5MS1xyaXn/UkoqH3QdWZC1aLs9NCl4F8ZE06g +jjvN9WdsCXmTEShqaXoRsZG7SfcQsu4gUUZ/fjbJ5hRf+QxMMKv42SUpqsRhslEF +/Pqu0WQd82CgG1a7XnfUO8BYSchTJZL55vx40ZZuQAu/ULsF7toa0lktijBxCPn3 +8HEnyLEyA3e8a93P0myWoxFn/fUpegT3TVSv33anqwIDAQABozUwMzALBgNVHQ8E +BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATANBgkq +hkiG9w0BAQsFAAOCAgEA1Bn7mpa2eJUo4+1X5lVLWWwtXLAfKiBf6OWNfacLV6FL +gyKpvvESTGuA5VAS0O97TPd7uyzEbUMS75TdmfAT8zecO2aXMb7aTyX+QbMj2gmk +zou72Fl4o6V1IvYpjKaNBZCS3Hk67ivRYbQCamEOk5UX9/wCdLvC9PH5Y+WqcPaz +7RLXe3OXhRbfFax4+pWzZxsgSKrEi8ZZ5gRa/bdJVVsTqk9LwS/CbMjEAkdzIBLt +cQb9BcnTJcQvp6ehNIVMdEC7GLXcDkefw7CL1ZfEh3DoJD3hiR6QwdWtdG0etoUf +w8LHZhCJD0IZxLMHiE+qiN4xkx+cznol+gAc9sfmtVK1CAW9l1Aa8zw5AfAyCg3h +jr6ymfwY8zlO21yBmCTg2+yTbU/0CqkgimQeztoYCh7+67QgnSCJMk2ffR6GPj1q +pfLI/5QNoxdFvR/lkwj5h/HRp9JZKTV/R/g0Va4Arg3Y7RTezjCYkJnX37ScnQhg +JLIeXmksFkc+Oz3yA+r60rR72+lsVzE87BCs+L0y16zcQnU5NqJXrSMMqCkjbs9l +b682+tnJKLFGQrYia/FL/Sc2L2Tn5hba5wWQTMjGujg76fkMc6VIv1qG3VGR/V1G +r11UJ+WjEcdrwZUm7E76p9DfTce52kGqGXwfrv6kQjvLhipwjzgv429txzDy82k= +-----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/web3signer/config b/testing/web3signer_tests/tls/web3signer/config new file mode 100644 index 0000000000..4b7e40618c --- /dev/null +++ b/testing/web3signer_tests/tls/web3signer/config @@ -0,0 +1,19 @@ +[req] +default_bits = 4096 +default_md = sha256 +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no +[req_distinguished_name] +C = US +ST = VA +L = SomeCity +O = MyCompany +OU = MyDivision +CN = web3signer +[v3_req] +keyUsage = keyEncipherment, dataEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names +[alt_names] +IP.1 = 127.0.0.1 diff --git a/testing/web3signer_tests/tls/web3signer/key.key b/testing/web3signer_tests/tls/web3signer/key.key new file mode 100644 index 0000000000..6e5171f374 --- /dev/null +++ b/testing/web3signer_tests/tls/web3signer/key.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJRQIBADANBgkqhkiG9w0BAQEFAASCCS8wggkrAgEAAoICAQDraQUU4O7K/aBA +iH86RV3ye/Q7vguwplUNku317chzyFdB+OnGSUga6+zjdUmr8+49nki1q0rLEU/x +J0NpffTdzFgk1nk6Jh7Ly26q18SNpwpuwdvbajnTeh+BPSWZQL85xfO9th/RkJkg +pzKukxK/npjvU6PbwiufSWI7mXNIgR0lIIacFXZ4RsD1PxZo/07ktoF0N+yLGW76 +yfeINRw43bG1MQxklePsk6zAUqJEi0tZmXqzh1NZHH5Q1VAEKKPWyAVTDi3bWmvh +3iSfgmckesjwUHANFeMhLpdiVTOi31OaILpx9HGRYYnqjW1AUZLoSMKkyPsm6IN6 +0GpAVI7TP3URVpTPPW78UeEUyeYN06tABYJsFWGFChg9Hf2yvcZU2DDGdHpxut6h +4WAwx9oL5rG4VSxFjhVi6ty3Hb9B0YFE/WNfV07wWPSQADZSK/ktfhE+8zavQzjs +xm2f1Ko5L/x8cIc5MS1xyaXn/UkoqH3QdWZC1aLs9NCl4F8ZE06gjjvN9WdsCXmT +EShqaXoRsZG7SfcQsu4gUUZ/fjbJ5hRf+QxMMKv42SUpqsRhslEF/Pqu0WQd82Cg +G1a7XnfUO8BYSchTJZL55vx40ZZuQAu/ULsF7toa0lktijBxCPn38HEnyLEyA3e8 +a93P0myWoxFn/fUpegT3TVSv33anqwIDAQABAoICAQDihR2kp4Rfw4luT2nNUm5C +JFAxJH/vLT5uX1Gm8XWPI9oC21dnu6Asd5RskrGfSouWszZXyUmg+TmpXRSa796t +hjHS0KW59HBxvYDx18mEXJXHWbcK/L5D5iFmpMYHH6xiFT6i8BrR9ofCSeCU52SF +CkEzGZJ0pfR/w4dIvjGWNNcsoI2mp2hl9/84fco8ol7x6UPL5vwwJPsLS0hqwmAz +v+994IKCT1EQllEGhv0pY7fPscXF9pOXDbnmYjwqpEhzJekpsF0j03A32R/4dOx2 +x8eOpngLv2Hczg5RSpbzRF4X0yJVANg/AlJJZmkYGOZ5qXnSQqqZF+dcSCvVVwhO +GS7uci6Mcy7Ov0Gj9HWX8As0SofPtUMuO7k/nJYOzcgY+4agyIDrylIeG86gdCDQ +hGVz+T5reJZIBMp66GPT6M8r36q50cx2x9nJjxLlIjvly1EruVjQoSMUfjewHG91 +xJI0iFhlbBrCpyLx3X0smMEr0vJzM5J0GtdxQdcSocDy5244+4zuslAXgsEYwHYx +WYFMsotRif8aB2b3OSt0yH+Heh06dZehvwWa4F4/3qlP48e0/CWIL7Y/tBgZv8Gh +n3F7HsHvMx6qQqMY5AxudbkpKdM9W84gXriHPIsO2oZEU6N65J/Bpq5Ve4JBlih1 +Ji0CtvHlAR2dhGkj6Q36MQKCAQEA9z/HTd8hd4FNEfn595OVWr9CeZc1zAlNa94I +lvTLhLEFcMkGsZd9KyV87MOV3p9m+XI7UJmqllIHOkwrECF2wzFssguPk+RAJ5hW +LZJgsF0fPnhX0qJFXzSNzzqAICES6+s9jvHMO9PhtF59uv4zsRFEBmKAr0AN8Zsk +rEk+2Tl2RgC+sxzRS767De9CrbSjxm+qAHuFFh8QX/N/mPoLUa+V5Oh2srA5bTHn +t0vyfQQ9+gqTBJDy51VGYlYw5OQBAiOPTgzbSmm2gqdWYgGn2Sp5IBQLF5nGGGsV +70DvnsoxViqpsv+yObAF9PqXnu6UGoB023Jr8x683bU9/jQFLQKCAQEA8735Vbbc +kncVJIpIlG7SDPmlLCFnxokvWWmyJS2J4SrIJJykn30qknGGZFFn67NB5PulAEaw +mdG58FIxxkm8bEKwebEhdnB9sP8k3TvddPKlBXYb1WuGxzyF/xlHniEJ7jN0YAAz +D1BLxTP1OM47iX5ocyVpOPbAdq/yZK0bffvIUy/QKLeJNx0d59PKpJRb4y6T/LvS +tp3UHrBqCNYYoKsZluS9Kg6WJF4g269yn2LSdtzQlAW1IT3DgO7h+2UBYI4FwMao +BZVew44CjljGCTA2KL4jdsqnTyt0qzzAiJZ0CGkJY9gal16ODHcBUKfNGYvjU8pf +2qDEiCn0HayXNwKCAQEAlOscLuHy9Viyw94NWbnRXlwOPM//fgooUIzmHY4Jur0o +arsZxgNZR5CHws82yGS4EAxmf3Bel7WGVu2jjk6pin2NW1utOcVjgrW1SjN8+xzL +gcPYGazVHbe4phU1MKTbEa+ZXyxx96LxscKr9eG/3qlokHPp0CRDgb8RApgHO6zp +eNZgBd+YjAewAH+YaKmBbza4bRv4l89T/Ibb1pbcFHIuVTZSr+OGYyeIyhT7U6Mn +dR/DVx+6vezVvMrvHh3aIaCrYrZJqnMrk1wYomUe5KU5WUHZQHjFINX22ykAamKb +/qsplP9/KFHF9Lyub/KAz8mJGNe8/y0HUn4kfaR1bQKCAQEAhZHTsx8UXMcZNP76 +qyzXuviLhVWBExFWez8quqjr6BKTv0yAAk6LJ9lCdnMN6eI/+AXW9AHJAWIm7QV9 +9VWvBfy9zNI+rjMTDg2j3ADUaSQXPpjsw9W69C+8loD5+DPOx1Q3L+ysDnZIL3c7 +qLeLdNtqzb7wnKDL876TrIwYhr+VldCb19RMQ4GXQ9WSNQKAIE0EF/mtjRmMhozS +bqk0scdRrJkI+KUpriBPDVRmEeYLw8taGePO0LqSCnPeLu+5A3qQuIWkyfqDBdMq +n2sSizJ6W3Vm5dBEQ2Ri+Pu/3pnkWD+HP8nLOKw+V6JXfCWYhaldGCvMv3heeufS +uPg9nQKCAQEAp/boT63JB+ahU3VQGtqwlDXkRS/Ge8a7FRp4kjdK7d1mtUDqOJ9U +l2RHgOkqhNuAPy64/07caDK3R7vKeOFmSXCV/WHIcgt46SRwFQECZeyA1R+EkTes +tseTngdFrQ10Xf+DmLNqCyX5KpgQf+ccluyyH6uK6FRI/VfU4sLrUGyOblqHq/c4 +bRR4nMwiw5yga45YhQH8uJF54MI7XaD2/hPCAIJBkx88taRzMUlWl1u1VQosIvtZ +5hCRepq9A44P61c+HI/5fzXAn2xvwR2EiV0hAYLn+rmYgBId/RfcstWUR78A9wpT +/OsV3MTX1gCaTE9Q2GlZVybDh20ZvdBC/g== +-----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/web3signer/key.p12 b/testing/web3signer_tests/tls/web3signer/key.p12 new file mode 100644 index 0000000000..459f4fb62e Binary files /dev/null and b/testing/web3signer_tests/tls/web3signer/key.p12 differ diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt new file mode 100644 index 0000000000..de80bb7ceb --- /dev/null +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -0,0 +1 @@ +lighthouse 1B:43:E1:58:26:7D:3F:70:BD:DA:32:E9:29:A5:A9:50:EA:B2:A8:C3:0C:82:BF:90:13:ED:5B:E0:7D:5B:0A:C0 diff --git a/testing/web3signer_tests/tls/password.txt b/testing/web3signer_tests/tls/web3signer/password.txt similarity index 100% rename from testing/web3signer_tests/tls/password.txt rename to testing/web3signer_tests/tls/web3signer/password.txt diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index a1604064ad..9833c046f5 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -30,7 +30,7 @@ dirs = "3.0.1" directory = { path = "../common/directory" } lockfile = { path = "../common/lockfile" } environment = { path = "../lighthouse/environment" } -parking_lot = "0.11.0" +parking_lot = "0.12.0" exit-future = "0.2.0" filesystem = { path = "../common/filesystem" } hex = "0.4.2" @@ -47,9 +47,9 @@ warp_utils = { path = "../common/warp_utils" } warp = "0.3.2" hyper = "0.14.4" eth2_serde_utils = "0.1.1" -libsecp256k1 = "0.6.0" +libsecp256k1 = "0.7.0" ring = "0.16.19" -rand = { version = "0.7.3", features = ["small_rng"] } +rand = { version = "0.8.5", features = ["small_rng"] } lighthouse_metrics = { path = "../common/lighthouse_metrics" } lazy_static = "1.4.0" itertools = "0.10.0" diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 0cba70481f..2ba81eac7a 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,3 +1,4 @@ +use crate::beacon_node_fallback::{AllErrored, Error as FallbackError}; use crate::{ beacon_node_fallback::{BeaconNodeFallback, RequireSynced}, graffiti_file::GraffitiFile, @@ -10,7 +11,30 @@ use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; use tokio::sync::mpsc; -use types::{EthSpec, PublicKeyBytes, Slot}; +use types::{ + BlindedPayload, BlockType, Epoch, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot, +}; + +#[derive(Debug)] +pub enum BlockError { + Recoverable(String), + Irrecoverable(String), +} + +impl From> for BlockError { + fn from(e: AllErrored) -> Self { + if e.0.iter().any(|(_, error)| { + matches!( + error, + FallbackError::RequestFailed(BlockError::Irrecoverable(_)) + ) + }) { + BlockError::Irrecoverable(e.to_string()) + } else { + BlockError::Recoverable(e.to_string()) + } + } +} /// Builds a `BlockService`. pub struct BlockServiceBuilder { @@ -20,6 +44,7 @@ pub struct BlockServiceBuilder { context: Option>, graffiti: Option, graffiti_file: Option, + private_tx_proposals: bool, } impl BlockServiceBuilder { @@ -31,6 +56,7 @@ impl BlockServiceBuilder { context: None, graffiti: None, graffiti_file: None, + private_tx_proposals: false, } } @@ -64,6 +90,11 @@ impl BlockServiceBuilder { self } + pub fn private_tx_proposals(mut self, private_tx_proposals: bool) -> Self { + self.private_tx_proposals = private_tx_proposals; + self + } + pub fn build(self) -> Result, String> { Ok(BlockService { inner: Arc::new(Inner { @@ -81,6 +112,7 @@ impl BlockServiceBuilder { .ok_or("Cannot build BlockService without runtime_context")?, graffiti: self.graffiti, graffiti_file: self.graffiti_file, + private_tx_proposals: self.private_tx_proposals, }), }) } @@ -94,6 +126,7 @@ pub struct Inner { context: RuntimeContext, graffiti: Option, graffiti_file: Option, + private_tx_proposals: bool, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -202,16 +235,46 @@ impl BlockService { ) } + let private_tx_proposals = self.private_tx_proposals; + let merge_slot = self + .context + .eth2_config + .spec + .bellatrix_fork_epoch + .unwrap_or_else(Epoch::max_value) + .start_slot(E::slots_per_epoch()); for validator_pubkey in proposers { let service = self.clone(); let log = log.clone(); self.inner.context.executor.spawn( async move { - if let Err(e) = service.publish_block(slot, validator_pubkey).await { + let publish_result = if private_tx_proposals && slot >= merge_slot { + let mut result = service.clone() + .publish_block::>(slot, validator_pubkey) + .await; + match result.as_ref() { + Err(BlockError::Recoverable(e)) => { + error!(log, "Error whilst producing a blinded block, attempting to publish full block"; "error" => ?e); + result = service + .publish_block::>(slot, validator_pubkey) + .await; + }, + Err(BlockError::Irrecoverable(e)) => { + error!(log, "Error whilst producing a blinded block, cannot fallback because block was signed"; "error" => ?e); + }, + _ => {}, + }; + result + } else { + service + .publish_block::>(slot, validator_pubkey) + .await + }; + if let Err(e) = publish_result { crit!( log, "Error whilst producing block"; - "message" => e + "message" => ?e ); } }, @@ -223,25 +286,29 @@ impl BlockService { } /// Produce a block at the given slot for validator_pubkey - async fn publish_block( + async fn publish_block>( self, slot: Slot, validator_pubkey: PublicKeyBytes, - ) -> Result<(), String> { + ) -> Result<(), BlockError> { let log = self.context.log(); let _timer = metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK]); - let current_slot = self - .slot_clock - .now() - .ok_or("Unable to determine current slot from clock")?; + let current_slot = self.slot_clock.now().ok_or_else(|| { + BlockError::Recoverable("Unable to determine current slot from clock".to_string()) + })?; let randao_reveal = self .validator_store .randao_reveal(validator_pubkey, slot.epoch(E::slots_per_epoch())) .await - .map_err(|e| format!("Unable to produce randao reveal signature: {:?}", e))? + .map_err(|e| { + BlockError::Recoverable(format!( + "Unable to produce randao reveal signature: {:?}", + e + )) + })? .into(); let graffiti = self @@ -261,58 +328,107 @@ impl BlockService { let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; - let signed_block = self + // Request block from first responsive beacon node. + let block = self .beacon_nodes .first_success(RequireSynced::No, |beacon_node| async move { let get_timer = metrics::start_timer_vec( &metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK_HTTP_GET], ); - let block = beacon_node - .get_validator_blocks(slot, randao_reveal_ref, graffiti.as_ref()) - .await - .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))? - .data; + let block = match Payload::block_type() { + BlockType::Full => { + beacon_node + .get_validator_blocks::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + BlockType::Blinded => { + beacon_node + .get_validator_blinded_blocks::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + }; drop(get_timer); if proposer_index != Some(block.proposer_index()) { - return Err( + return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged" .to_string(), - ); + )); } - let signed_block = self_ref - .validator_store - .sign_block(*validator_pubkey_ref, block, current_slot) - .await - .map_err(|e| format!("Unable to sign block: {:?}", e))?; + Ok::<_, BlockError>(block) + }) + .await?; + let signed_block = self_ref + .validator_store + .sign_block::(*validator_pubkey_ref, block, current_slot) + .await + .map_err(|e| BlockError::Recoverable(format!("Unable to sign block: {:?}", e)))?; + + // Publish block with first available beacon node. + self.beacon_nodes + .first_success(RequireSynced::No, |beacon_node| async { let _post_timer = metrics::start_timer_vec( &metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK_HTTP_POST], ); - beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - format!("Error from beacon node when publishing block: {:?}", e) - })?; - Ok::<_, String>(signed_block) + match Payload::block_type() { + BlockType::Full => beacon_node + .post_beacon_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })?, + BlockType::Blinded => beacon_node + .post_beacon_blinded_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })?, + } + + info!( + log, + "Successfully published block"; + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); + Ok::<_, BlockError>(()) }) - .await - .map_err(|e| e.to_string())?; - - info!( - log, - "Successfully published block"; - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); - + .await?; Ok(()) } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 49a8f58167..d02e26ace0 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -258,4 +258,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { immediately.") .takes_value(false), ) + .arg( + Arg::with_name("private-tx-proposals") + .long("private-tx-proposals") + .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ + headers during proposals and will sign over headers. Useful for outsourcing \ + execution payload construction during proposals.") + .takes_value(false), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 232526bac5..45e10e39e8 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -55,6 +55,7 @@ pub struct Config { /// If true, enable functionality that monitors the network for attestations or proposals from /// any of the validators managed by this client before starting up. pub enable_doppelganger_protection: bool, + pub private_tx_proposals: bool, /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option>, @@ -91,6 +92,7 @@ impl Default for Config { monitoring_api: None, enable_doppelganger_protection: false, beacon_nodes_tls_certs: None, + private_tx_proposals: false, } } } @@ -306,6 +308,10 @@ impl Config { config.enable_doppelganger_protection = true; } + if cli_args.is_present("private-tx-proposals") { + config.private_tx_proposals = true; + } + Ok(config) } } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 6428034d8b..f8ca5a3d44 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -646,17 +646,18 @@ async fn poll_beacon_attesters_for_epoch( response .data .into_iter() - .filter(|duty| local_pubkeys.contains(&duty.pubkey)) .filter(|duty| { - // Only update the duties if either is true: - // - // - There were no known duties for this epoch. - // - The dependent root has changed, signalling a re-org. - attesters.get(&duty.pubkey).map_or(true, |duties| { - duties - .get(&epoch) - .map_or(true, |(prior, _)| *prior != dependent_root) - }) + local_pubkeys.contains(&duty.pubkey) && { + // Only update the duties if either is true: + // + // - There were no known duties for this epoch. + // - The dependent root has changed, signalling a re-org. + attesters.get(&duty.pubkey).map_or(true, |duties| { + duties + .get(&epoch) + .map_or(true, |(prior, _)| *prior != dependent_root) + }) + } }) .collect::>() }; diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index a8e4fd2629..db59c25f75 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -1,5 +1,5 @@ use crate::ValidatorStore; -use account_utils::validator_definitions::{SigningDefinition, ValidatorDefinition}; +use account_utils::validator_definitions::ValidatorDefinition; use account_utils::{ eth2_wallet::{bip39::Mnemonic, WalletBuilder}, random_mnemonic, random_password, ZeroizeString, @@ -164,24 +164,12 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, } pub async fn create_validators_web3signer( - validator_requests: &[api_types::Web3SignerValidatorRequest], + validators: Vec, validator_store: &ValidatorStore, ) -> Result<(), warp::Rejection> { - for request in validator_requests { - let validator_definition = ValidatorDefinition { - enabled: request.enable, - voting_public_key: request.voting_public_key.clone(), - graffiti: request.graffiti.clone(), - suggested_fee_recipient: request.suggested_fee_recipient, - description: request.description.clone(), - signing_definition: SigningDefinition::Web3Signer { - url: request.url.clone(), - root_certificate_path: request.root_certificate_path.clone(), - request_timeout_ms: request.request_timeout_ms, - }, - }; + for validator in validators { validator_store - .add_validator(validator_definition) + .add_validator(validator) .await .map_err(|e| { warp_utils::reject::custom_server_error(format!( diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index ce6089c5b6..f88aacfca8 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -1,5 +1,8 @@ //! Implementation of the standard keystore management API. -use crate::{signing_method::SigningMethod, InitializedValidators, ValidatorStore}; +use crate::{ + initialized_validators::Error, signing_method::SigningMethod, InitializedValidators, + ValidatorStore, +}; use account_utils::ZeroizeString; use eth2::lighthouse_vc::std_types::{ DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, ImportKeystoreStatus, @@ -11,8 +14,8 @@ use slog::{info, warn, Logger}; use slot_clock::SlotClock; use std::path::PathBuf; use std::sync::Arc; -use std::sync::Weak; -use tokio::runtime::Runtime; +use task_executor::TaskExecutor; +use tokio::runtime::Handle; use types::{EthSpec, PublicKeyBytes}; use validator_dir::Builder as ValidatorDirBuilder; use warp::Rejection; @@ -56,7 +59,7 @@ pub fn import( request: ImportKeystoresRequest, validator_dir: PathBuf, validator_store: Arc>, - runtime: Weak, + task_executor: TaskExecutor, log: Logger, ) -> Result { // Check request validity. This is the only cases in which we should return a 4xx code. @@ -119,14 +122,14 @@ pub fn import( ImportKeystoreStatus::Error, format!("slashing protection import failed: {:?}", e), ) - } else if let Some(runtime) = runtime.upgrade() { + } else if let Some(handle) = task_executor.handle() { // Import the keystore. match import_single_keystore( keystore, password, validator_dir.clone(), &validator_store, - runtime, + handle, ) { Ok(status) => Status::ok(status), Err(e) => { @@ -156,7 +159,7 @@ fn import_single_keystore( password: ZeroizeString, validator_dir_path: PathBuf, validator_store: &ValidatorStore, - runtime: Arc, + handle: Handle, ) -> Result { // Check if the validator key already exists, erroring if it is a remote signer validator. let pubkey = keystore @@ -195,7 +198,7 @@ fn import_single_keystore( let voting_keystore_path = validator_dir.voting_keystore_path(); drop(validator_dir); - runtime + handle .block_on(validator_store.add_validator_keystore( voting_keystore_path, password, @@ -211,7 +214,7 @@ fn import_single_keystore( pub fn delete( request: DeleteKeystoresRequest, validator_store: Arc>, - runtime: Weak, + task_executor: TaskExecutor, log: Logger, ) -> Result { // Remove from initialized validators. @@ -222,8 +225,11 @@ pub fn delete( .pubkeys .iter() .map(|pubkey_bytes| { - match delete_single_keystore(pubkey_bytes, &mut initialized_validators, runtime.clone()) - { + match delete_single_keystore( + pubkey_bytes, + &mut initialized_validators, + task_executor.clone(), + ) { Ok(status) => Status::ok(status), Err(error) => { warn!( @@ -241,8 +247,8 @@ pub fn delete( // Use `update_validators` to update the key cache. It is safe to let the key cache get a bit out // of date as it resets when it can't be decrypted. We update it just a single time to avoid // continually resetting it after each key deletion. - if let Some(runtime) = runtime.upgrade() { - runtime + if let Some(handle) = task_executor.handle() { + handle .block_on(initialized_validators.update_validators()) .map_err(|e| custom_server_error(format!("unable to update key cache: {:?}", e)))?; } @@ -275,16 +281,21 @@ pub fn delete( fn delete_single_keystore( pubkey_bytes: &PublicKeyBytes, initialized_validators: &mut InitializedValidators, - runtime: Weak, + task_executor: TaskExecutor, ) -> Result { - if let Some(runtime) = runtime.upgrade() { + if let Some(handle) = task_executor.handle() { let pubkey = pubkey_bytes .decompress() .map_err(|e| format!("invalid pubkey, {:?}: {:?}", pubkey_bytes, e))?; - runtime - .block_on(initialized_validators.delete_definition_and_keystore(&pubkey)) - .map_err(|e| format!("unable to disable and delete: {:?}", e)) + match handle.block_on(initialized_validators.delete_definition_and_keystore(&pubkey, true)) + { + Ok(_) => Ok(DeleteKeystoreStatus::Deleted), + Err(e) => match e { + Error::ValidatorNotInitialized(_) => Ok(DeleteKeystoreStatus::NotFound), + _ => Err(format!("unable to disable and delete: {:?}", e)), + }, + } } else { Err("validator client shutdown".into()) } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 8e1f5a7390..9ee983a35a 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1,10 +1,14 @@ mod api_secret; mod create_validator; mod keystores; +mod remotekeys; mod tests; use crate::ValidatorStore; -use account_utils::mnemonic_from_phrase; +use account_utils::{ + mnemonic_from_phrase, + validator_definitions::{SigningDefinition, ValidatorDefinition}, +}; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; use eth2::lighthouse_vc::{ std_types::AuthResponse, @@ -18,8 +22,8 @@ use std::future::Future; use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; -use std::sync::{Arc, Weak}; -use tokio::runtime::Runtime; +use std::sync::Arc; +use task_executor::TaskExecutor; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; use warp::{ @@ -55,7 +59,7 @@ impl From for Error { /// /// The server will gracefully handle the case where any fields are `None`. pub struct Context { - pub runtime: Weak, + pub task_executor: TaskExecutor, pub api_secret: ApiSecret, pub validator_store: Option>>, pub validator_dir: Option, @@ -157,8 +161,8 @@ pub fn serve( }) }); - let inner_runtime = ctx.runtime.clone(); - let runtime_filter = warp::any().map(move || inner_runtime.clone()); + let inner_task_executor = ctx.task_executor.clone(); + let task_executor_filter = warp::any().map(move || inner_task_executor.clone()); let inner_validator_dir = ctx.validator_dir.clone(); let validator_dir_filter = warp::any() @@ -286,18 +290,18 @@ pub fn serve( .and(validator_store_filter.clone()) .and(spec_filter.clone()) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |body: Vec, validator_dir: PathBuf, validator_store: Arc>, spec: Arc, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { - if let Some(runtime) = runtime.upgrade() { + if let Some(handle) = task_executor.handle() { let (validators, mnemonic) = - runtime.block_on(create_validators_mnemonic( + handle.block_on(create_validators_mnemonic( None, None, &body, @@ -312,7 +316,7 @@ pub fn serve( Ok(api_types::GenericResponse::from(response)) } else { Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )) } }) @@ -329,16 +333,16 @@ pub fn serve( .and(validator_store_filter.clone()) .and(spec_filter) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |body: api_types::CreateValidatorsMnemonicRequest, validator_dir: PathBuf, validator_store: Arc>, spec: Arc, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { - if let Some(runtime) = runtime.upgrade() { + if let Some(handle) = task_executor.handle() { let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| { warp_utils::reject::custom_bad_request(format!( @@ -347,7 +351,7 @@ pub fn serve( )) })?; let (validators, _mnemonic) = - runtime.block_on(create_validators_mnemonic( + handle.block_on(create_validators_mnemonic( Some(mnemonic), Some(body.key_derivation_path_offset), &body.validators, @@ -358,7 +362,7 @@ pub fn serve( Ok(api_types::GenericResponse::from(validators)) } else { Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )) } }) @@ -374,13 +378,13 @@ pub fn serve( .and(validator_dir_filter.clone()) .and(validator_store_filter.clone()) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |body: api_types::KeystoreValidatorsPostRequest, validator_dir: PathBuf, validator_store: Arc>, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { // Check to ensure the password is correct. let keypair = body @@ -412,8 +416,8 @@ pub fn serve( let suggested_fee_recipient = body.suggested_fee_recipient; let validator_def = { - if let Some(runtime) = runtime.upgrade() { - runtime + if let Some(handle) = task_executor.handle() { + handle .block_on(validator_store.add_validator_keystore( voting_keystore_path, voting_password, @@ -429,7 +433,7 @@ pub fn serve( })? } else { return Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )); } }; @@ -451,19 +455,39 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter.clone()) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |body: Vec, validator_store: Arc>, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { - if let Some(runtime) = runtime.upgrade() { - runtime.block_on(create_validators_web3signer(&body, &validator_store))?; + if let Some(handle) = task_executor.handle() { + let web3signers: Vec = body + .into_iter() + .map(|web3signer| ValidatorDefinition { + enabled: web3signer.enable, + voting_public_key: web3signer.voting_public_key, + graffiti: web3signer.graffiti, + suggested_fee_recipient: web3signer.suggested_fee_recipient, + description: web3signer.description, + signing_definition: SigningDefinition::Web3Signer { + url: web3signer.url, + root_certificate_path: web3signer.root_certificate_path, + request_timeout_ms: web3signer.request_timeout_ms, + client_identity_path: web3signer.client_identity_path, + client_identity_password: web3signer.client_identity_password, + }, + }) + .collect(); + handle.block_on(create_validators_web3signer( + web3signers, + &validator_store, + ))?; Ok(()) } else { Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )) } }) @@ -478,13 +502,13 @@ pub fn serve( .and(warp::body::json()) .and(validator_store_filter.clone()) .and(signer.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and_then( |validator_pubkey: PublicKey, body: api_types::ValidatorPatchRequest, validator_store: Arc>, signer, - runtime: Weak| { + task_executor: TaskExecutor| { blocking_signed_json_task(signer, move || { let initialized_validators_rw_lock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rw_lock.write(); @@ -496,8 +520,8 @@ pub fn serve( ))), Some(enabled) if enabled == body.enabled => Ok(()), Some(_) => { - if let Some(runtime) = runtime.upgrade() { - runtime + if let Some(handle) = task_executor.handle() { + handle .block_on( initialized_validators .set_validator_status(&validator_pubkey, body.enabled), @@ -511,7 +535,7 @@ pub fn serve( Ok(()) } else { Err(warp_utils::reject::custom_server_error( - "Runtime shutdown".into(), + "Lighthouse shutting down".into(), )) } } @@ -536,6 +560,7 @@ pub fn serve( // Standard key-manager endpoints. let eth_v1 = warp::path("eth").and(warp::path("v1")); let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); + let std_remotekeys = eth_v1.and(warp::path("remotekeys")).and(warp::path::end()); // GET /eth/v1/keystores let get_std_keystores = std_keystores @@ -551,26 +576,60 @@ pub fn serve( .and(signer.clone()) .and(validator_dir_filter) .and(validator_store_filter.clone()) - .and(runtime_filter.clone()) + .and(task_executor_filter.clone()) .and(log_filter.clone()) .and_then( - |request, signer, validator_dir, validator_store, runtime, log| { + |request, signer, validator_dir, validator_store, task_executor, log| { blocking_signed_json_task(signer, move || { - keystores::import(request, validator_dir, validator_store, runtime, log) + keystores::import(request, validator_dir, validator_store, task_executor, log) }) }, ); // DELETE /eth/v1/keystores let delete_std_keystores = std_keystores + .and(warp::body::json()) + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and(task_executor_filter.clone()) + .and(log_filter.clone()) + .and_then(|request, signer, validator_store, task_executor, log| { + blocking_signed_json_task(signer, move || { + keystores::delete(request, validator_store, task_executor, log) + }) + }); + + // GET /eth/v1/remotekeys + let get_std_remotekeys = std_remotekeys + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and_then(|signer, validator_store: Arc>| { + blocking_signed_json_task(signer, move || Ok(remotekeys::list(validator_store))) + }); + + // POST /eth/v1/remotekeys + let post_std_remotekeys = std_remotekeys + .and(warp::body::json()) + .and(signer.clone()) + .and(validator_store_filter.clone()) + .and(task_executor_filter.clone()) + .and(log_filter.clone()) + .and_then(|request, signer, validator_store, task_executor, log| { + blocking_signed_json_task(signer, move || { + remotekeys::import(request, validator_store, task_executor, log) + }) + }); + + // DELETE /eth/v1/remotekeys + let delete_std_remotekeys = std_remotekeys .and(warp::body::json()) .and(signer) .and(validator_store_filter) - .and(runtime_filter) - .and(log_filter) - .and_then(|request, signer, validator_store, runtime, log| { + .and(task_executor_filter) + .and(log_filter.clone()) + .and_then(|request, signer, validator_store, task_executor, log| { blocking_signed_json_task(signer, move || { - keystores::delete(request, validator_store, runtime, log) + remotekeys::delete(request, validator_store, task_executor, log) }) }); @@ -588,17 +647,19 @@ pub fn serve( .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) - .or(get_std_keystores), + .or(get_std_keystores) + .or(get_std_remotekeys), ) .or(warp::post().and( post_validators .or(post_validators_keystore) .or(post_validators_mnemonic) .or(post_validators_web3signer) - .or(post_std_keystores), + .or(post_std_keystores) + .or(post_std_remotekeys), )) .or(warp::patch().and(patch_validators)) - .or(warp::delete().and(delete_std_keystores)), + .or(warp::delete().and(delete_std_keystores.or(delete_std_remotekeys))), ) // The auth route is the only route that is allowed to be accessed without the API token. .or(warp::get().and(get_auth)) diff --git a/validator_client/src/http_api/remotekeys.rs b/validator_client/src/http_api/remotekeys.rs new file mode 100644 index 0000000000..402396d4b4 --- /dev/null +++ b/validator_client/src/http_api/remotekeys.rs @@ -0,0 +1,209 @@ +//! Implementation of the standard remotekey management API. +use crate::{initialized_validators::Error, InitializedValidators, ValidatorStore}; +use account_utils::validator_definitions::{SigningDefinition, ValidatorDefinition}; +use eth2::lighthouse_vc::std_types::{ + DeleteRemotekeyStatus, DeleteRemotekeysRequest, DeleteRemotekeysResponse, + ImportRemotekeyStatus, ImportRemotekeysRequest, ImportRemotekeysResponse, + ListRemotekeysResponse, SingleListRemotekeysResponse, Status, +}; +use slog::{info, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use task_executor::TaskExecutor; +use tokio::runtime::Handle; +use types::{EthSpec, PublicKeyBytes}; +use url::Url; +use warp::Rejection; +use warp_utils::reject::custom_server_error; + +pub fn list( + validator_store: Arc>, +) -> ListRemotekeysResponse { + let initialized_validators_rwlock = validator_store.initialized_validators(); + let initialized_validators = initialized_validators_rwlock.read(); + + let keystores = initialized_validators + .validator_definitions() + .iter() + .filter(|def| def.enabled) + .filter_map(|def| { + let validating_pubkey = def.voting_public_key.compress(); + + match &def.signing_definition { + SigningDefinition::LocalKeystore { .. } => None, + SigningDefinition::Web3Signer { url, .. } => Some(SingleListRemotekeysResponse { + pubkey: validating_pubkey, + url: url.clone(), + readonly: false, + }), + } + }) + .collect::>(); + + ListRemotekeysResponse { data: keystores } +} + +pub fn import( + request: ImportRemotekeysRequest, + validator_store: Arc>, + task_executor: TaskExecutor, + log: Logger, +) -> Result { + info!( + log, + "Importing remotekeys via standard HTTP API"; + "count" => request.remote_keys.len(), + ); + // Import each remotekey. Some remotekeys may fail to be imported, so we record a status for each. + let mut statuses = Vec::with_capacity(request.remote_keys.len()); + + for remotekey in request.remote_keys { + let status = if let Some(handle) = task_executor.handle() { + // Import the keystore. + match import_single_remotekey(remotekey.pubkey, remotekey.url, &validator_store, handle) + { + Ok(status) => Status::ok(status), + Err(e) => { + warn!( + log, + "Error importing keystore, skipped"; + "pubkey" => remotekey.pubkey.to_string(), + "error" => ?e, + ); + Status::error(ImportRemotekeyStatus::Error, e) + } + } + } else { + Status::error( + ImportRemotekeyStatus::Error, + "validator client shutdown".into(), + ) + }; + statuses.push(status); + } + Ok(ImportRemotekeysResponse { data: statuses }) +} + +fn import_single_remotekey( + pubkey: PublicKeyBytes, + url: String, + validator_store: &ValidatorStore, + handle: Handle, +) -> Result { + if let Err(url_err) = Url::parse(&url) { + return Err(format!("failed to parse remotekey URL: {}", url_err)); + } + + let pubkey = pubkey + .decompress() + .map_err(|_| format!("invalid pubkey: {}", pubkey))?; + + if let Some(def) = validator_store + .initialized_validators() + .read() + .validator_definitions() + .iter() + .find(|def| def.voting_public_key == pubkey) + { + if def.signing_definition.is_local_keystore() { + return Err("Pubkey already present in local keystore.".into()); + } else if def.enabled { + return Ok(ImportRemotekeyStatus::Duplicate); + } + } + + // Remotekeys are stored as web3signers. + // The remotekey API provides less confgiuration option than the web3signer API. + let web3signer_validator = ValidatorDefinition { + enabled: true, + voting_public_key: pubkey, + graffiti: None, + suggested_fee_recipient: None, + description: String::from("Added by remotekey API"), + signing_definition: SigningDefinition::Web3Signer { + url, + root_certificate_path: None, + request_timeout_ms: None, + client_identity_path: None, + client_identity_password: None, + }, + }; + handle + .block_on(validator_store.add_validator(web3signer_validator)) + .map_err(|e| format!("failed to initialize validator: {:?}", e))?; + + Ok(ImportRemotekeyStatus::Imported) +} + +pub fn delete( + request: DeleteRemotekeysRequest, + validator_store: Arc>, + task_executor: TaskExecutor, + log: Logger, +) -> Result { + info!( + log, + "Deleting remotekeys via standard HTTP API"; + "count" => request.pubkeys.len(), + ); + // Remove from initialized validators. + let initialized_validators_rwlock = validator_store.initialized_validators(); + let mut initialized_validators = initialized_validators_rwlock.write(); + + let statuses = request + .pubkeys + .iter() + .map(|pubkey_bytes| { + match delete_single_remotekey( + pubkey_bytes, + &mut initialized_validators, + task_executor.clone(), + ) { + Ok(status) => Status::ok(status), + Err(error) => { + warn!( + log, + "Error deleting keystore"; + "pubkey" => ?pubkey_bytes, + "error" => ?error, + ); + Status::error(DeleteRemotekeyStatus::Error, error) + } + } + }) + .collect::>(); + + // Use `update_validators` to update the key cache. It is safe to let the key cache get a bit out + // of date as it resets when it can't be decrypted. We update it just a single time to avoid + // continually resetting it after each key deletion. + if let Some(handle) = task_executor.handle() { + handle + .block_on(initialized_validators.update_validators()) + .map_err(|e| custom_server_error(format!("unable to update key cache: {:?}", e)))?; + } + + Ok(DeleteRemotekeysResponse { data: statuses }) +} + +fn delete_single_remotekey( + pubkey_bytes: &PublicKeyBytes, + initialized_validators: &mut InitializedValidators, + task_executor: TaskExecutor, +) -> Result { + if let Some(handle) = task_executor.handle() { + let pubkey = pubkey_bytes + .decompress() + .map_err(|e| format!("invalid pubkey, {:?}: {:?}", pubkey_bytes, e))?; + + match handle.block_on(initialized_validators.delete_definition_and_keystore(&pubkey, false)) + { + Ok(_) => Ok(DeleteRemotekeyStatus::Deleted), + Err(e) => match e { + Error::ValidatorNotInitialized(_) => Ok(DeleteRemotekeyStatus::NotFound), + _ => Err(format!("unable to disable and delete: {:?}", e)), + }, + } + } else { + Err("validator client shutdown".into()) + } +} diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index eef76eb363..210555d9c0 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -102,7 +102,7 @@ impl ApiTester { spec, Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock, - executor, + executor.clone(), log.clone(), )); @@ -113,7 +113,7 @@ impl ApiTester { let initialized_validators = validator_store.initialized_validators(); let context = Arc::new(Context { - runtime, + task_executor: executor, api_secret, validator_dir: Some(validator_dir.path().into()), validator_store: Some(validator_store.clone()), @@ -457,6 +457,8 @@ impl ApiTester { url: format!("http://signer_{}.com/", i), root_certificate_path: None, request_timeout_ms: None, + client_identity_path: None, + client_identity_password: None, } }) .collect(); diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index 427f22adc3..a381378ffe 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -8,8 +8,7 @@ use eth2::lighthouse_vc::{ use itertools::Itertools; use rand::{rngs::SmallRng, Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; -use std::collections::HashMap; -use std::path::Path; +use std::{collections::HashMap, path::Path}; fn new_keystore(password: ZeroizeString) -> Keystore { let keypair = Keypair::random(); @@ -41,6 +40,21 @@ fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorReq url: web3_signer_url(), root_certificate_path: None, request_timeout_ms: None, + client_identity_path: None, + client_identity_password: None, + } +} + +fn new_remotekey_validator() -> (Keypair, SingleImportRemotekeysRequest) { + let keypair = Keypair::random(); + let pk = keypair.pk.clone(); + (keypair, remotekey_validator_with_pubkey(pk)) +} + +fn remotekey_validator_with_pubkey(pubkey: PublicKey) -> SingleImportRemotekeysRequest { + SingleImportRemotekeysRequest { + pubkey: pubkey.compress(), + url: web3_signer_url(), } } @@ -107,7 +121,7 @@ fn all_delete_error(count: usize) -> impl Iterator all_with_status(count, DeleteKeystoreStatus::Error) } -fn check_get_response<'a>( +fn check_keystore_get_response<'a>( response: &ListKeystoresResponse, expected_keystores: impl IntoIterator, ) { @@ -118,7 +132,7 @@ fn check_get_response<'a>( } } -fn check_import_response( +fn check_keystore_import_response( response: &ImportKeystoresResponse, expected_statuses: impl IntoIterator, ) { @@ -131,7 +145,7 @@ fn check_import_response( } } -fn check_delete_response<'a>( +fn check_keystore_delete_response<'a>( response: &DeleteKeystoresResponse, expected_statuses: impl IntoIterator, ) { @@ -144,6 +158,41 @@ fn check_delete_response<'a>( } } +fn check_remotekey_get_response( + response: &ListRemotekeysResponse, + expected_keystores: impl IntoIterator, +) { + for expected in expected_keystores { + assert!(response.data.contains(&expected)); + } +} + +fn check_remotekey_import_response( + response: &ImportRemotekeysResponse, + expected_statuses: impl IntoIterator, +) { + for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) { + assert_eq!( + expected_status, status.status, + "message: {:?}", + status.message + ); + } +} + +fn check_remotekey_delete_response( + response: &DeleteRemotekeysResponse, + expected_statuses: impl IntoIterator, +) { + for (status, expected_status) in response.data.iter().zip_eq(expected_statuses) { + assert_eq!( + status.status, expected_status, + "message: {:?}", + status.message + ); + } +} + #[test] fn get_auth_no_token() { run_test(|mut tester| async move { @@ -189,11 +238,11 @@ fn import_new_keystores() { .unwrap(); // All keystores should be imported. - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // Check that GET lists all the imported keystores. let get_res = tester.client.get_keystores().await.unwrap(); - check_get_response(&get_res, &keystores); + check_keystore_get_response(&get_res, &keystores); }) } @@ -214,15 +263,15 @@ fn import_only_duplicate_keystores() { // All keystores should be imported on first import. let import_res = tester.client.post_keystores(&req).await.unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // No keystores should be imported on repeat import. let import_res = tester.client.post_keystores(&req).await.unwrap(); - check_import_response(&import_res, all_duplicate(keystores.len())); + check_keystore_import_response(&import_res, all_duplicate(keystores.len())); // Check that GET lists all the imported keystores. let get_res = tester.client.get_keystores().await.unwrap(); - check_get_response(&get_res, &keystores); + check_keystore_get_response(&get_res, &keystores); }) } @@ -262,7 +311,7 @@ fn import_some_duplicate_keystores() { }; let import_res = tester.client.post_keystores(&req1).await.unwrap(); - check_import_response(&import_res, all_imported(keystores1.len())); + check_keystore_import_response(&import_res, all_imported(keystores1.len())); // Check partial import. let expected = (0..num_keystores).map(|i| { @@ -273,7 +322,7 @@ fn import_some_duplicate_keystores() { } }); let import_res = tester.client.post_keystores(&req2).await.unwrap(); - check_import_response(&import_res, expected); + check_keystore_import_response(&import_res, expected); }) } @@ -323,7 +372,7 @@ fn get_web3_signer_keystores() { .unwrap(); // All keystores should be imported. - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // Add some web3signer validators. let remote_vals = (0..num_remote) @@ -391,14 +440,14 @@ fn import_and_delete_conflicting_web3_signer_keystores() { slashing_protection: None, }; let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_import_error(keystores.len())); + check_keystore_import_response(&import_res, all_import_error(keystores.len())); // Attempt to delete the web3signer validators, which should fail. let delete_req = DeleteKeystoresRequest { pubkeys: pubkeys.clone(), }; let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_delete_error(keystores.len())); + check_keystore_delete_response(&delete_res, all_delete_error(keystores.len())); // Get should still list all the validators as `readonly`. let get_res = tester.client.get_keystores().await.unwrap(); @@ -418,9 +467,9 @@ fn import_and_delete_conflicting_web3_signer_keystores() { .unwrap(); } let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_import_error(keystores.len())); + check_keystore_import_response(&import_res, all_import_error(keystores.len())); let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_delete_error(keystores.len())); + check_keystore_delete_response(&delete_res, all_delete_error(keystores.len())); }) } @@ -464,7 +513,7 @@ fn import_keystores_wrong_password() { ImportKeystoreStatus::Imported } }); - check_import_response(&import_res, expected_statuses); + check_keystore_import_response(&import_res, expected_statuses); // Import again with the correct passwords and check that the statuses are as expected. let correct_import_req = ImportKeystoresRequest { @@ -484,7 +533,7 @@ fn import_keystores_wrong_password() { ImportKeystoreStatus::Duplicate } }); - check_import_response(&import_res, expected_statuses); + check_keystore_import_response(&import_res, expected_statuses); // Import one final time, at which point all keys should be duplicates. let import_res = tester @@ -492,7 +541,7 @@ fn import_keystores_wrong_password() { .post_keystores(&correct_import_req) .await .unwrap(); - check_import_response( + check_keystore_import_response( &import_res, (0..num_keystores).map(|_| ImportKeystoreStatus::Duplicate), ); @@ -528,11 +577,11 @@ fn import_invalid_slashing_protection() { .unwrap(); // All keystores should be imported. - check_import_response(&import_res, all_import_error(keystores.len())); + check_keystore_import_response(&import_res, all_import_error(keystores.len())); // Check that GET lists none of the failed keystores. let get_res = tester.client.get_keystores().await.unwrap(); - check_get_response(&get_res, &[]); + check_keystore_get_response(&get_res, &[]); }) } @@ -669,7 +718,7 @@ fn generic_migration_test( }) .await .unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // Sign attestations on VC1. for (validator_index, mut attestation) in first_vc_attestations { @@ -694,7 +743,7 @@ fn generic_migration_test( }) .await .unwrap(); - check_delete_response(&delete_res, all_deleted(delete_indices.len())); + check_keystore_delete_response(&delete_res, all_deleted(delete_indices.len())); // Check that slashing protection data was returned for all selected validators. assert_eq!( @@ -745,7 +794,7 @@ fn generic_migration_test( }) .await .unwrap(); - check_import_response(&import_res, all_imported(import_indices.len())); + check_keystore_import_response(&import_res, all_imported(import_indices.len())); // Sign attestations on the second VC. for (validator_index, mut attestation, should_succeed) in second_vc_attestations { @@ -779,18 +828,18 @@ fn delete_keystores_twice() { slashing_protection: None, }; let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // 2. Delete all. let delete_req = DeleteKeystoresRequest { pubkeys: keystores.iter().map(keystore_pubkey).collect(), }; let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_deleted(keystores.len())); + check_keystore_delete_response(&delete_res, all_deleted(keystores.len())); // 3. Delete again. let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_not_active(keystores.len())); + check_keystore_delete_response(&delete_res, all_not_active(keystores.len())); }) } @@ -808,7 +857,7 @@ fn delete_nonexistent_keystores() { pubkeys: keystores.iter().map(keystore_pubkey).collect(), }; let delete_res = tester.client.delete_keystores(&delete_req).await.unwrap(); - check_delete_response(&delete_res, all_not_found(keystores.len())); + check_keystore_delete_response(&delete_res, all_not_found(keystores.len())); }) } @@ -868,7 +917,7 @@ fn delete_concurrent_with_signing() { }) .await .unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // Start several threads signing attestations at sequential epochs. let mut join_handles = vec![]; @@ -936,8 +985,7 @@ fn delete_concurrent_with_signing() { for interchange in collected_slashing_protection .into_iter() - .map(Result::unwrap) - .flatten() + .flat_map(Result::unwrap) { for validator_data in interchange.data { slashing_protection_map @@ -972,7 +1020,7 @@ fn delete_then_reimport() { slashing_protection: None, }; let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); // 2. Delete all. let delete_res = tester @@ -982,10 +1030,770 @@ fn delete_then_reimport() { }) .await .unwrap(); - check_delete_response(&delete_res, all_deleted(keystores.len())); + check_keystore_delete_response(&delete_res, all_deleted(keystores.len())); // 3. Re-import let import_res = tester.client.post_keystores(&import_req).await.unwrap(); - check_import_response(&import_res, all_imported(keystores.len())); + check_keystore_import_response(&import_res, all_imported(keystores.len())); + }) +} + +#[test] +fn get_empty_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + let res = tester.client.get_remotekeys().await.unwrap(); + assert_eq!(res, ListRemotekeysResponse { data: vec![] }); + }) +} + +#[test] +fn import_new_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekeys. + let remotekeys = (0..3) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // All keystores should be imported. + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Check list response. + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_same_remotekey_different_url() { + run_test(|tester| async move { + let _ = &tester; + + // Create two remotekeys with different urls. + let remotekey1 = new_remotekey_validator().1; + let mut remotekey2 = remotekey1.clone(); + remotekey2.url = "http://localhost:1/this-url-hopefully-does-also-not-exist".into(); + let remotekeys = vec![remotekey1, remotekey2]; + + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // Both remotekeys have the same public key and therefore only the first one should be imported. + check_remotekey_import_response( + &import_res, + vec![ + ImportRemotekeyStatus::Imported, + ImportRemotekeyStatus::Duplicate, + ] + .into_iter(), + ); + + // Only first key is imported and should be returned. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response( + &get_res, + vec![SingleListRemotekeysResponse { + pubkey: remotekeys[0].pubkey, + url: remotekeys[0].url.clone(), + readonly: false, + }], + ); + }) +} + +#[test] +fn delete_remotekey_then_reimport_different_url() { + run_test(|tester| async move { + let _ = &tester; + + // Create two remotekeys with different urls. + let mut remotekey = new_remotekey_validator().1; + let remotekeys = vec![remotekey.clone()]; + + // Import and Delete remotekey. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + vec![ImportRemotekeyStatus::Imported].into_iter(), + ); + let delete_req = DeleteRemotekeysRequest { + pubkeys: remotekeys.iter().map(|k| k.pubkey).collect(), + }; + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::Deleted), + ); + + // Change remotekey url. + remotekey.url = "http://localhost:1/this-url-hopefully-does-also-not-exist".into(); + let remotekeys = vec![remotekey.clone()]; + + // Reimport remotekey. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + vec![ImportRemotekeyStatus::Imported].into_iter(), + ); + }) +} + +#[test] +fn import_only_duplicate_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + let remotekeys = (0..3) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // All remotekeys should be imported on first import. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // No remotekeys should be imported on repeat import. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Duplicate), + ); + + // Check list response. + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_some_duplicate_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + let num_remotekeys = 5; + let remotekeys_all = (0..num_remotekeys) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Select even numbered keystores. + let remotekeys_even = remotekeys_all + .iter() + .enumerate() + .filter_map(|(i, remotekey)| { + if i % 2 == 0 { + Some(remotekey.clone()) + } else { + None + } + }) + .collect::>(); + + // Only import every second remotekey. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys_even.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys_even.len(), ImportRemotekeyStatus::Imported), + ); + + let expected = (0..num_remotekeys).map(|i| { + if i % 2 == 0 { + ImportRemotekeyStatus::Duplicate + } else { + ImportRemotekeyStatus::Imported + } + }); + + // Try to import all keys. Every second import should be a duplicate. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys_all.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response(&import_res, expected); + + // Check list response. + let expected_responses = remotekeys_all + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_remote_and_local_keys() { + run_test(|tester| async move { + let _ = &tester; + let num_local = 3; + let num_remote = 2; + + // Generate local keystores. + let password = random_password_string(); + let keystores = (0..num_local) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Import keystores. + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_keystore_import_response( + &import_res, + all_with_status(keystores.len(), ImportKeystoreStatus::Imported), + ); + + // Add some remotekey validators. + let remotekeys = (0..num_remote) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // All remotekeys should be imported. + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Check that only remote validators are returned. + let get_res = tester.client.get_keystores().await.unwrap(); + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleKeystoreResponse { + validating_pubkey: remotekey.pubkey, + derivation_path: None, + readonly: Some(true), + }) + .collect::>(); + for response in expected_responses { + assert!(get_res.data.contains(&response), "{:?}", response); + } + }) +} + +#[test] +fn import_same_local_and_remote_keys() { + run_test(|tester| async move { + let _ = &tester; + let num_local = 3; + + // Generate local keystores. + let password = random_password_string(); + let keystores = (0..num_local) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Generate remotekeys with same pubkey as local keystores. + let mut remotekeys = Vec::new(); + for keystore in keystores.iter() { + remotekeys.push(remotekey_validator_with_pubkey( + keystore.public_key().unwrap(), + )); + } + + // Import keystores. + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_keystore_import_response( + &import_res, + all_with_status(keystores.len(), ImportKeystoreStatus::Imported), + ); + + // Try to import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // All remotekey import should fail. Already imported as local keystore. + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Error), + ); + + // Check that only local keystores are returned. + let get_res = tester.client.get_keystores().await.unwrap(); + let expected_responses = keystores + .iter() + .map(|local_keystore| SingleKeystoreResponse { + validating_pubkey: keystore_pubkey(local_keystore), + derivation_path: local_keystore.path(), + readonly: None, + }) + .collect::>(); + for response in expected_responses { + assert!(get_res.data.contains(&response), "{:?}", response); + } + }) +} +#[test] +fn import_same_remote_and_local_keys() { + run_test(|tester| async move { + let _ = &tester; + let num_local = 3; + + // Generate local keystores. + let password = random_password_string(); + let keystores = (0..num_local) + .map(|_| new_keystore(password.clone())) + .collect::>(); + + // Generate remotekeys with same pubkey as local keystores. + let mut remotekeys = Vec::new(); + for keystore in keystores.iter() { + remotekeys.push(remotekey_validator_with_pubkey( + keystore.public_key().unwrap(), + )); + } + + // Import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + + // All remotekeys should be imported. + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Try to import local keystores. + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All local keystore imports should fail. Already imported as remotekeys. + check_keystore_import_response( + &import_res, + all_with_status(keystores.len(), ImportKeystoreStatus::Error), + ); + + // Check that only remotekeys are returned. + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn delete_remotekeys_twice() { + run_test(|tester| async move { + let _ = &tester; + + // Generate some remotekeys. + let remotekeys = (0..2) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Import all remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Delete all. + let delete_req = DeleteRemotekeysRequest { + pubkeys: remotekeys.iter().map(|k| k.pubkey).collect(), + }; + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::Deleted), + ); + + // Try to delete again. + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::NotFound), + ); + + // Check list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, Vec::new()); + }) +} + +#[test] +fn delete_nonexistent_remotekey() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekeys. + let remotekeys = (0..2) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Try to delete remotekeys. + let delete_req = DeleteRemotekeysRequest { + pubkeys: remotekeys.iter().map(|k| k.pubkey).collect(), + }; + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::NotFound), + ); + + // Check list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, Vec::new()); + }) +} + +#[test] +fn delete_then_reimport_remotekeys() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekeys. + let mut remotekeys = (0..2) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Import all remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Delete all. + let delete_req = DeleteRemotekeysRequest { + pubkeys: remotekeys.iter().map(|k| k.pubkey).collect(), + }; + let delete_res = tester.client.delete_remotekeys(&delete_req).await.unwrap(); + check_remotekey_delete_response( + &delete_res, + all_with_status(remotekeys.len(), DeleteRemotekeyStatus::Deleted), + ); + + // Change remote key url + for rk in remotekeys.iter_mut() { + rk.url = "http://localhost:1/this-url-hopefully-does-also-not-exist".into(); + } + + // Re-import + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + // Check list response. + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .collect::>(); + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_remotekey_web3signer() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekeys. + let remotekeys = (0..2) + .map(|_| new_remotekey_validator().1) + .collect::>(); + + // Generate web3signers. + let web3signers = (0..2) + .map(|_| new_web3signer_validator().1) + .collect::>(); + + // Import web3signers. + tester + .client + .post_lighthouse_validators_web3signer(&web3signers) + .await + .unwrap(); + + // Import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: remotekeys.clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(remotekeys.len(), ImportRemotekeyStatus::Imported), + ); + + let expected_responses = remotekeys + .iter() + .map(|remotekey| SingleListRemotekeysResponse { + pubkey: remotekey.pubkey, + url: remotekey.url.clone(), + readonly: false, + }) + .chain( + web3signers + .iter() + .map(|websigner| SingleListRemotekeysResponse { + pubkey: websigner.voting_public_key.compress(), + url: websigner.url.clone(), + readonly: false, + }), + ) + .collect::>(); + + // Check remotekey list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_remotekey_web3signer_disabled() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekey. + let (kp, remotekey_req) = new_remotekey_validator(); + + // Generate web3signer with same PK. + let mut web3signer_req = web3signer_validator_with_pubkey(kp.pk); + web3signer_req.enable = false; + + // Import web3signers. + let _ = tester + .client + .post_lighthouse_validators_web3signer(&vec![web3signer_req]) + .await + .unwrap(); + + // 1 validator imported. + assert_eq!(tester.vals_total(), 1); + assert_eq!(tester.vals_enabled(), 0); + + // Import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: vec![remotekey_req.clone()].clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(1, ImportRemotekeyStatus::Imported), + ); + + // Still only one validator. Web3signer is overwritten by remotekey. + assert_eq!(tester.vals_total(), 1); + assert_eq!(tester.vals_enabled(), 1); + + // Remotekey overwrites web3signer. + let expected_responses = vec![SingleListRemotekeysResponse { + pubkey: remotekey_req.pubkey, + url: remotekey_req.url.clone(), + readonly: false, + }]; + + // Check remotekey list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); + }) +} + +#[test] +fn import_remotekey_web3signer_enabled() { + run_test(|tester| async move { + let _ = &tester; + + // Generate remotekey. + let (kp, remotekey_req) = new_remotekey_validator(); + + // Generate web3signer with same PK. + let mut web3signer_req = web3signer_validator_with_pubkey(kp.pk); + web3signer_req.url = "http://localhost:1/this-url-hopefully-does-also-not-exist".into(); + web3signer_req.enable = true; + + // Import web3signers. + tester + .client + .post_lighthouse_validators_web3signer(&vec![web3signer_req.clone()]) + .await + .unwrap(); + + // 1 validator imported. + assert_eq!(tester.vals_total(), 1); + assert_eq!(tester.vals_enabled(), 1); + let vals = tester.initialized_validators.read(); + let web3_vals = vals.validator_definitions().clone(); + + // Import remotekeys. + let import_res = tester + .client + .post_remotekeys(&ImportRemotekeysRequest { + remote_keys: vec![remotekey_req.clone()].clone(), + }) + .await + .unwrap(); + check_remotekey_import_response( + &import_res, + all_with_status(1, ImportRemotekeyStatus::Duplicate), + ); + + assert_eq!(tester.vals_total(), 1); + assert_eq!(tester.vals_enabled(), 1); + let vals = tester.initialized_validators.read(); + let remote_vals = vals.validator_definitions().clone(); + + // Web3signer should not be overwritten since it is enabled. + assert!(web3_vals == remote_vals); + + // Remotekey should not be imported. + let expected_responses = vec![SingleListRemotekeysResponse { + pubkey: web3signer_req.voting_public_key.compress(), + url: web3signer_req.url.clone(), + readonly: false, + }]; + + // Check remotekey list response. + let get_res = tester.client.get_remotekeys().await.unwrap(); + check_remotekey_get_response(&get_res, expected_responses); }) } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index a4dedf16b2..0d5d4ad76e 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -14,12 +14,11 @@ use account_utils::{ }, ZeroizeString, }; -use eth2::lighthouse_vc::std_types::DeleteKeystoreStatus; use eth2_keystore::Keystore; use lighthouse_metrics::set_gauge; use lockfile::{Lockfile, LockfileError}; use parking_lot::{MappedMutexGuard, Mutex, MutexGuard}; -use reqwest::{Certificate, Client, Error as ReqwestError}; +use reqwest::{Certificate, Client, Error as ReqwestError, Identity}; use slog::{debug, error, info, warn, Logger}; use std::collections::{HashMap, HashSet}; use std::fs::{self, File}; @@ -89,9 +88,14 @@ pub enum Error { /// Unable to read the root certificate file for the remote signer. InvalidWeb3SignerRootCertificateFile(io::Error), InvalidWeb3SignerRootCertificate(ReqwestError), + /// Unable to read the client certificate for the remote signer. + MissingWeb3SignerClientIdentityCertificateFile, + MissingWeb3SignerClientIdentityPassword, + InvalidWeb3SignerClientIdentityCertificateFile(io::Error), + InvalidWeb3SignerClientIdentityCertificate(ReqwestError), UnableToBuildWeb3SignerClient(ReqwestError), - /// Unable to apply an action to a validator because it is using a remote signer. - InvalidActionOnRemoteValidator, + /// Unable to apply an action to a validator. + InvalidActionOnValidator, } impl From for Error { @@ -239,6 +243,8 @@ impl InitializedValidator { url, root_certificate_path, request_timeout_ms, + client_identity_path, + client_identity_password, } => { let signing_url = build_web3_signer_url(&url, &def.voting_public_key) .map_err(|e| Error::InvalidWeb3SignerUrl(e.to_string()))?; @@ -255,6 +261,20 @@ impl InitializedValidator { builder }; + let builder = if let Some(path) = client_identity_path { + let identity = load_pkcs12_identity( + path, + &client_identity_password + .ok_or(Error::MissingWeb3SignerClientIdentityPassword)?, + )?; + builder.identity(identity) + } else { + if client_identity_password.is_some() { + return Err(Error::MissingWeb3SignerClientIdentityCertificateFile); + } + builder + }; + let http_client = builder .build() .map_err(Error::UnableToBuildWeb3SignerClient)?; @@ -295,6 +315,19 @@ pub fn load_pem_certificate>(pem_path: P) -> Result>( + pkcs12_path: P, + password: &str, +) -> Result { + let mut buf = Vec::new(); + File::open(&pkcs12_path) + .map_err(Error::InvalidWeb3SignerClientIdentityCertificateFile)? + .read_to_end(&mut buf) + .map_err(Error::InvalidWeb3SignerClientIdentityCertificateFile)?; + Identity::from_pkcs12_der(&buf, password) + .map_err(Error::InvalidWeb3SignerClientIdentityCertificate) +} + fn build_web3_signer_url(base_url: &str, voting_public_key: &PublicKey) -> Result { Url::parse(base_url)?.join(&format!("api/v1/eth2/sign/{}", voting_public_key)) } @@ -443,7 +476,8 @@ impl InitializedValidators { pub async fn delete_definition_and_keystore( &mut self, pubkey: &PublicKey, - ) -> Result { + is_local_keystore: bool, + ) -> Result<(), Error> { // 1. Disable the validator definition. // // We disable before removing so that in case of a crash the auto-discovery mechanism @@ -454,16 +488,19 @@ impl InitializedValidators { .iter_mut() .find(|def| &def.voting_public_key == pubkey) { - if def.signing_definition.is_local_keystore() { + // Update definition for local keystore + if def.signing_definition.is_local_keystore() && is_local_keystore { def.enabled = false; self.definitions .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; + } else if !def.signing_definition.is_local_keystore() && !is_local_keystore { + def.enabled = false; } else { - return Err(Error::InvalidActionOnRemoteValidator); + return Err(Error::InvalidActionOnValidator); } } else { - return Ok(DeleteKeystoreStatus::NotFound); + return Err(Error::ValidatorNotInitialized(pubkey.clone())); } // 2. Delete from `self.validators`, which holds the signing method. @@ -491,7 +528,7 @@ impl InitializedValidators { .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; - Ok(DeleteKeystoreStatus::Deleted) + Ok(()) } /// Attempt to delete the voting keystore file, or its entire validator directory. diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index c58ac25f1f..43f88b54f0 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -400,6 +400,7 @@ impl ProductionValidatorClient { .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) + .private_tx_proposals(config.private_tx_proposals) .build()?; let attestation_service = AttestationServiceBuilder::new() @@ -497,7 +498,7 @@ impl ProductionValidatorClient { self.http_api_listen_addr = if self.config.http_api.enabled { let ctx = Arc::new(http_api::Context { - runtime: self.context.executor.runtime(), + task_executor: self.context.executor.clone(), api_secret, validator_store: Some(self.validator_store.clone()), validator_dir: Some(self.config.validator_dir.clone()), diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index ad04717cc2..b4b6caa05d 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -199,7 +199,8 @@ impl PreparationService { .map_err(|e| { error!( log, - "{}", format!("Error loading fee-recipient file: {:?}", e); + "Error loading fee-recipient file"; + "error" => ?e ); }) .unwrap_or(()); @@ -213,44 +214,39 @@ impl PreparationService { all_pubkeys .into_iter() .filter_map(|pubkey| { - let validator_index = self.validator_store.validator_index(&pubkey); - if let Some(validator_index) = validator_index { - let fee_recipient = if let Some(from_validator_defs) = - self.validator_store.suggested_fee_recipient(&pubkey) - { - // If there is a `suggested_fee_recipient` in the validator definitions yaml - // file, use that value. - Some(from_validator_defs) - } else { - // If there's nothing in the validator defs file, check the fee recipient - // file. - fee_recipient_file - .as_ref() - .and_then(|f| match f.get_fee_recipient(&pubkey) { - Ok(f) => f, - Err(_e) => None, - }) - // If there's nothing in the file, try the process-level default value. - .or(self.fee_recipient) - }; + // Ignore fee recipients for keys without indices, they are inactive. + let validator_index = self.validator_store.validator_index(&pubkey)?; - if let Some(fee_recipient) = fee_recipient { - Some(ProposerPreparationData { - validator_index, - fee_recipient, - }) - } else { - if spec.bellatrix_fork_epoch.is_some() { - error!( - log, - "Validator is missing fee recipient"; - "msg" => "update validator_definitions.yml", - "pubkey" => ?pubkey - ); - } - None - } + // If there is a `suggested_fee_recipient` in the validator definitions yaml + // file, use that value. + let fee_recipient = self + .validator_store + .suggested_fee_recipient(&pubkey) + .or_else(|| { + // If there's nothing in the validator defs file, check the fee + // recipient file. + fee_recipient_file + .as_ref()? + .get_fee_recipient(&pubkey) + .ok()? + }) + // If there's nothing in the file, try the process-level default value. + .or(self.fee_recipient); + + if let Some(fee_recipient) = fee_recipient { + Some(ProposerPreparationData { + validator_index, + fee_recipient, + }) } else { + if spec.bellatrix_fork_epoch.is_some() { + error!( + log, + "Validator is missing fee recipient"; + "msg" => "update validator_definitions.yml", + "pubkey" => ?pubkey + ); + } None } }) diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 3c12ac1e62..0daefc43c4 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -33,9 +33,9 @@ pub enum Error { } /// Enumerates all messages that can be signed by a validator. -pub enum SignableMessage<'a, T: EthSpec> { +pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload = FullPayload> { RandaoReveal(Epoch), - BeaconBlock(&'a BeaconBlock), + BeaconBlock(&'a BeaconBlock), AttestationData(&'a AttestationData), SignedAggregateAndProof(&'a AggregateAndProof), SelectionProof(Slot), @@ -47,7 +47,7 @@ pub enum SignableMessage<'a, T: EthSpec> { SignedContributionAndProof(&'a ContributionAndProof), } -impl<'a, T: EthSpec> SignableMessage<'a, T> { +impl<'a, T: EthSpec, Payload: ExecPayload> SignableMessage<'a, T, Payload> { /// Returns the `SignedRoot` for the contained message. /// /// The actual `SignedRoot` trait is not used since it also requires a `TreeHash` impl, which is @@ -113,9 +113,9 @@ impl SigningContext { impl SigningMethod { /// Return the signature of `signable_message`, with respect to the `signing_context`. - pub async fn get_signature( + pub async fn get_signature>( &self, - signable_message: SignableMessage<'_, T>, + signable_message: SignableMessage<'_, T, Payload>, signing_context: SigningContext, spec: &ChainSpec, executor: &TaskExecutor, diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index b632986c94..9ac1655cce 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -34,7 +34,7 @@ pub struct ForkInfo { #[derive(Debug, PartialEq, Serialize)] #[serde(bound = "T: EthSpec", rename_all = "snake_case")] -pub enum Web3SignerObject<'a, T: EthSpec> { +pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload> { AggregationSlot { slot: Slot, }, @@ -42,7 +42,7 @@ pub enum Web3SignerObject<'a, T: EthSpec> { Attestation(&'a AttestationData), BeaconBlock { version: ForkName, - block: &'a BeaconBlock, + block: &'a BeaconBlock, }, #[allow(dead_code)] Deposit { @@ -66,8 +66,8 @@ pub enum Web3SignerObject<'a, T: EthSpec> { ContributionAndProof(&'a ContributionAndProof), } -impl<'a, T: EthSpec> Web3SignerObject<'a, T> { - pub fn beacon_block(block: &'a BeaconBlock) -> Result { +impl<'a, T: EthSpec, Payload: ExecPayload> Web3SignerObject<'a, T, Payload> { + pub fn beacon_block(block: &'a BeaconBlock) -> Result { let version = match block { BeaconBlock::Base(_) => ForkName::Phase0, BeaconBlock::Altair(_) => ForkName::Altair, @@ -99,7 +99,7 @@ impl<'a, T: EthSpec> Web3SignerObject<'a, T> { #[derive(Debug, PartialEq, Serialize)] #[serde(bound = "T: EthSpec")] -pub struct SigningRequest<'a, T: EthSpec> { +pub struct SigningRequest<'a, T: EthSpec, Payload: ExecPayload> { #[serde(rename = "type")] pub message_type: MessageType, #[serde(skip_serializing_if = "Option::is_none")] @@ -107,7 +107,7 @@ pub struct SigningRequest<'a, T: EthSpec> { #[serde(rename = "signingRoot")] pub signing_root: Hash256, #[serde(flatten)] - pub object: Web3SignerObject<'a, T>, + pub object: Web3SignerObject<'a, T, Payload>, } #[derive(Debug, PartialEq, Deserialize)] diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 3f4a01faaa..b39ef9ef83 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -18,10 +18,11 @@ use std::sync::Arc; use task_executor::TaskExecutor; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, Address, AggregateAndProof, - Attestation, BeaconBlock, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, - Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedContributionAndProof, Slot, SyncAggregatorSelectionData, - SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, + Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, + EthSpec, ExecPayload, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, + Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, Slot, + SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, }; use validator_dir::ValidatorDir; @@ -170,6 +171,8 @@ impl ValidatorStore { /// - Adding the validator definition to the YAML file, saving it to the filesystem. /// - Enabling the validator with the slashing protection database. /// - If `enable == true`, starting to perform duties for the validator. + // FIXME: ignore this clippy lint until the validator store is refactored to use async locks + #[allow(clippy::await_holding_lock)] pub async fn add_validator( &self, validator_def: ValidatorDefinition, @@ -338,7 +341,7 @@ impl ValidatorStore { let signing_context = self.signing_context(Domain::Randao, signing_epoch); let signature = signing_method - .get_signature::( + .get_signature::>( SignableMessage::RandaoReveal(signing_epoch), signing_context, &self.spec, @@ -359,12 +362,12 @@ impl ValidatorStore { .suggested_fee_recipient(validator_pubkey) } - pub async fn sign_block( + pub async fn sign_block>( &self, validator_pubkey: PublicKeyBytes, - block: BeaconBlock, + block: BeaconBlock, current_slot: Slot, - ) -> Result, Error> { + ) -> Result, Error> { // Make sure the block slot is not higher than the current slot to avoid potential attacks. if block.slot() > current_slot { warn!( @@ -397,7 +400,7 @@ impl ValidatorStore { let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; let signature = signing_method - .get_signature( + .get_signature::( SignableMessage::BeaconBlock(&block), signing_context, &self.spec, @@ -466,7 +469,7 @@ impl ValidatorStore { Ok(Safe::Valid) => { let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; let signature = signing_method - .get_signature::( + .get_signature::>( SignableMessage::AttestationData(&attestation.data), signing_context, &self.spec, @@ -543,7 +546,7 @@ impl ValidatorStore { let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; let signature = signing_method - .get_signature( + .get_signature::>( SignableMessage::SignedAggregateAndProof(&message), signing_context, &self.spec, @@ -576,7 +579,7 @@ impl ValidatorStore { let signing_method = self.doppelganger_bypassed_signing_method(validator_pubkey)?; let signature = signing_method - .get_signature::( + .get_signature::>( SignableMessage::SelectionProof(slot), signing_context, &self.spec, @@ -615,7 +618,7 @@ impl ValidatorStore { }; let signature = signing_method - .get_signature::( + .get_signature::>( SignableMessage::SyncSelectionProof(&message), signing_context, &self.spec, @@ -641,7 +644,7 @@ impl ValidatorStore { let signing_method = self.doppelganger_bypassed_signing_method(*validator_pubkey)?; let signature = signing_method - .get_signature::( + .get_signature::>( SignableMessage::SyncCommitteeSignature { beacon_block_root, slot, @@ -686,7 +689,7 @@ impl ValidatorStore { }; let signature = signing_method - .get_signature( + .get_signature::>( SignableMessage::SignedContributionAndProof(&message), signing_context, &self.spec,