diff --git a/.github/custom/clippy.toml b/.github/custom/clippy.toml index df09502307..f50e35bcdf 100644 --- a/.github/custom/clippy.toml +++ b/.github/custom/clippy.toml @@ -18,4 +18,5 @@ async-wrapper-methods = [ "warp_utils::task::blocking_json_task", "validator_client::http_api::blocking_signed_json_task", "execution_layer::test_utils::MockServer::new", + "execution_layer::test_utils::MockServer::new_with_config", ] diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 13c1af7ab6..35032a0932 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -18,6 +18,9 @@ jobs: steps: - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install ganache run: npm install ganache@latest --global @@ -37,13 +40,29 @@ jobs: run: make && make install-lcli - name: Start local testnet - run: ./start_local_testnet.sh + run: ./start_local_testnet.sh && sleep 60 working-directory: scripts/local_testnet - name: Print logs - run: ./print_logs.sh + run: ./dump_logs.sh working-directory: scripts/local_testnet - name: Stop local testnet run: ./stop_local_testnet.sh working-directory: scripts/local_testnet + + - name: Clean-up testnet + run: ./clean.sh + working-directory: scripts/local_testnet + + - name: Start local testnet with blinded block production + run: ./start_local_testnet.sh -p && sleep 60 + working-directory: scripts/local_testnet + + - name: Print logs for blinded block testnet + run: ./dump_logs.sh + working-directory: scripts/local_testnet + + - name: Stop local testnet with blinded block production + run: ./stop_local_testnet.sh + working-directory: scripts/local_testnet diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index a58491d04f..f26eadc398 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -104,7 +104,7 @@ jobs: run: make test-op-pool debug-tests-ubuntu: name: debug-tests-ubuntu - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: cargo-fmt steps: - uses: actions/checkout@v1 @@ -158,6 +158,18 @@ jobs: run: sudo npm install -g ganache - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim + merge-transition-ubuntu: + name: merge-transition-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install ganache + run: sudo npm install -g ganache + - name: Run the beacon chain sim and go through the merge transition + run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: name: no-eth1-simulator-ubuntu runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 9830ef39be..ae9f83c46d 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ perf.data* /bin genesis.ssz /clippy.toml + +# IntelliJ +/*.iml diff --git a/Cargo.lock b/Cargo.lock index d17370042b..1447a8e4e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,15 +141,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" +checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" [[package]] name = "arbitrary" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e0a02cf12f1b1f48b14cb7f8217b876d09992b39c816ffb3b1ba64dd979a87" +checksum = "5a7924531f38b1970ff630f03eb20a2fde69db5c590c93b0f3482e95dcc5fd60" dependencies = [ "derive_arbitrary", ] @@ -178,6 +178,27 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +[[package]] +name = "async-stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.56" @@ -189,6 +210,17 @@ dependencies = [ "syn", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version 0.4.0", +] + [[package]] name = "asynchronous-codec" version = "0.6.0" @@ -224,6 +256,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7862e21c893d65a1650125d157eaeec691439379a1cee17ee49031b79236ada4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "autocfg" version = "0.1.8" @@ -240,10 +284,55 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] -name = "backtrace" -version = "0.3.65" +name = "axum" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" +checksum = "6b9496f0c1d1afb7a2af4338bbe1d969cddfead41d87a9fb3aaa6d0bbc7af648" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa 1.0.2", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite 0.2.9", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4f44a0e6200e9d11a1cdc989e4b358f6e3d354fbf48478f345a17f4e43f8635" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", +] + +[[package]] +name = "backtrace" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" dependencies = [ "addr2line", "cc", @@ -268,9 +357,27 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "base64ct" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" +checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" + +[[package]] +name = "beacon-api-client" +version = "0.1.0" +source = "git+https://github.com/ralexstokes/beacon-api-client#061c1b1bb1f18bcd7cf23d4cd375f99c78d5a2a5" +dependencies = [ + "ethereum-consensus", + "http", + "itertools", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", + "url", +] [[package]] name = "beacon_chain" @@ -324,11 +431,12 @@ dependencies = [ "tokio", "tree_hash", "types", + "unused_port", ] [[package]] name = "beacon_node" -version = "2.3.2-rc.0" +version = "2.5.0" dependencies = [ "beacon_chain", "clap", @@ -407,9 +515,9 @@ dependencies = [ [[package]] name = "bitvec" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty 2.0.0", "radium 0.7.0", @@ -472,9 +580,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c521c26a784d5c4bcd98d483a7d3518376e9ff1efbcfa9e2d456ab8183752303" +checksum = "6a30d0edd9dd1c60ddb42b80341c7852f6f985279a5c1a83659dcb65899dec99" dependencies = [ "cc", "glob", @@ -485,7 +593,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.3.2-rc.0" +version = "2.5.0" dependencies = [ "beacon_node", "clap", @@ -568,9 +676,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "f0b3de4a0c5e67e16066a0715723abd91edc2f9001d09c46e1dca929351e130e" dependencies = [ "serde", ] @@ -613,12 +721,9 @@ dependencies = [ [[package]] name = "cast" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" -dependencies = [ - "rustc_version 0.4.0", -] +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" @@ -643,9 +748,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b72a433d0cf2aef113ba70f62634c56fddb0f244e6377185c56a7cadbd8f91" +checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", "cipher", @@ -655,9 +760,9 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b84ed6d1d5f7aa9bdde921a5090e0ca4d934d250ea3b402a5fab3a994e28a2a" +checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ "aead", "chacha20", @@ -762,7 +867,7 @@ dependencies = [ "slot_clock", "store", "task_executor", - "time 0.3.9", + "time 0.3.11", "timer", "tokio", "types", @@ -865,9 +970,9 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", @@ -891,9 +996,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" dependencies = [ "cast", "itertools", @@ -901,9 +1006,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if", "crossbeam-utils", @@ -911,9 +1016,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -922,9 +1027,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" +checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" dependencies = [ "autocfg 1.1.0", "cfg-if", @@ -936,9 +1041,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" +checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" dependencies = [ "cfg-if", "once_cell", @@ -964,9 +1069,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", @@ -1029,7 +1134,7 @@ version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" dependencies = [ - "nix 0.24.1", + "nix 0.24.2", "winapi", ] @@ -1190,9 +1295,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8728db27dd9033a7456655aaeb35fde74425d0f130b4cb18a19171ef38a1b454" +checksum = "c9a577516173adb681466d517d39bd468293bc2c2a16439375ef0f35bba45f3d" dependencies = [ "proc-macro2", "quote", @@ -1400,9 +1505,9 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" [[package]] name = "elliptic-curve" @@ -1703,6 +1808,7 @@ version = "0.4.1" dependencies = [ "eth2_ssz_derive", "ethereum-types 0.12.1", + "itertools", "smallvec", ] @@ -1816,6 +1922,27 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "ethereum-consensus" +version = "0.1.0" +source = "git+https://github.com/ralexstokes/ethereum-consensus#592eb44dc24403cc9d152f4b96683ab551533201" +dependencies = [ + "async-stream", + "blst", + "enr", + "hex", + "integer-sqrt", + "multiaddr 0.14.0", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.9.9", + "ssz-rs", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "ethereum-types" version = "0.12.1" @@ -1865,14 +1992,54 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "ethers-providers" +version = "0.6.0" +source = "git+https://github.com/gakonst/ethers-rs?rev=02ad93a1cfb7b62eb051c77c61dc4c0218428e4a#02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" +dependencies = [ + "async-trait", + "auto_impl", + "base64", + "ethers-core", + "futures-channel", + "futures-core", + "futures-timer", + "futures-util", + "hex", + "http", + "once_cell", + "parking_lot 0.11.2", + "pin-project 1.0.11", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-tungstenite 0.17.2", + "tracing", + "tracing-futures", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-timer", + "web-sys", + "ws_stream_wasm", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" dependencies = [ + "deposit_contract", "environment", + "ethers-core", + "ethers-providers", "execution_layer", "exit-future", + "fork_choice", "futures", + "hex", + "reqwest", "sensitive_url", "serde_json", "task_executor", @@ -1894,14 +2061,17 @@ dependencies = [ "eth2_serde_utils", "eth2_ssz", "eth2_ssz_types", + "ethereum-consensus", "ethers-core", "exit-future", + "fork_choice", "futures", "hex", "jsonwebtoken", "lazy_static", "lighthouse_metrics", "lru", + "mev-build-rs", "parking_lot 0.12.1", "rand 0.8.5", "reqwest", @@ -1910,6 +2080,7 @@ dependencies = [ "serde_json", "slog", "slot_clock", + "ssz-rs", "state_processing", "task_executor", "tempfile", @@ -1951,9 +2122,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -2017,9 +2188,9 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" @@ -2061,6 +2232,7 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "proto_array", + "state_processing", "store", "tokio", "types", @@ -2272,9 +2444,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" [[package]] name = "git-version" @@ -2362,9 +2534,12 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] [[package]] name = "hashlink" @@ -2519,6 +2694,12 @@ dependencies = [ "pin-project-lite 0.2.9", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "http_api" version = "0.1.0" @@ -2540,6 +2721,7 @@ dependencies = [ "lru", "network", "parking_lot 0.12.1", + "proto_array", "safe_arith", "sensitive_url", "serde", @@ -2553,6 +2735,7 @@ dependencies = [ "tokio-stream", "tree_hash", "types", + "unused_port", "warp", "warp_utils", ] @@ -2598,9 +2781,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.19" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -2620,6 +2803,19 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +dependencies = [ + "http", + "hyper", + "rustls 0.20.6", + "tokio", + "tokio-rustls 0.23.4", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -2743,12 +2939,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c6392766afd7964e2531940894cffe4bd8d7d17dbc3c1c4857040fd4b33bdb3" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg 1.1.0", - "hashbrown 0.12.1", + "hashbrown 0.12.3", ] [[package]] @@ -2758,6 +2954,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] @@ -2819,9 +3018,9 @@ checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "js-sys" -version = "0.3.58" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" +checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" dependencies = [ "wasm-bindgen", ] @@ -2903,7 +3102,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.3.2-rc.0" +version = "2.5.0" dependencies = [ "account_utils", "bls", @@ -2927,6 +3126,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "snap", "state_processing", "tree_hash", "types", @@ -3042,7 +3242,7 @@ dependencies = [ "libp2p-yamux", "multiaddr 0.14.0", "parking_lot 0.12.1", - "pin-project 1.0.10", + "pin-project 1.0.11", "rand 0.7.3", "smallvec", ] @@ -3068,7 +3268,7 @@ dependencies = [ "multihash 0.14.0", "multistream-select 0.10.4", "parking_lot 0.11.2", - "pin-project 1.0.10", + "pin-project 1.0.11", "prost 0.9.0", "prost-build 0.9.0", "rand 0.8.5", @@ -3103,7 +3303,7 @@ dependencies = [ "multihash 0.16.2", "multistream-select 0.11.0", "parking_lot 0.12.1", - "pin-project 1.0.10", + "pin-project 1.0.11", "prost 0.10.4", "prost-build 0.10.4", "rand 0.8.5", @@ -3182,9 +3382,9 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4357140141ba9739eee71b20aa735351c0fc642635b2bffc7f57a6b5c1090" +checksum = "564a7e5284d7d9b3140fdfc3cb6567bc32555e86a21de5604c2ec85da05cf384" dependencies = [ "libp2p-core 0.33.0", "libp2p-gossipsub", @@ -3263,7 +3463,7 @@ dependencies = [ "instant", "libp2p-core 0.33.0", "log", - "pin-project 1.0.10", + "pin-project 1.0.11", "rand 0.7.3", "smallvec", "thiserror", @@ -3331,9 +3531,9 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", "base64", @@ -3401,7 +3601,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.3.2-rc.0" +version = "2.5.0" dependencies = [ "account_manager", "account_utils", @@ -3506,9 +3706,9 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lock_api" @@ -3550,11 +3750,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84e6fe5655adc6ce00787cf7dcaf8dc4f998a0565d23eafc207a8b08ca3349a" +checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" dependencies = [ - "hashbrown 0.11.2", + "hashbrown 0.12.3", ] [[package]] @@ -3619,6 +3819,12 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +[[package]] +name = "matchit" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" + [[package]] name = "mdbx-sys" version = "0.11.6-4" @@ -3658,6 +3864,22 @@ dependencies = [ "safe_arith", ] +[[package]] +name = "mev-build-rs" +version = "0.2.0" +source = "git+https://github.com/ralexstokes/mev-rs?tag=v0.2.0#921fa3f7c3497839461964a5297dfe4f2cef3136" +dependencies = [ + "async-trait", + "axum", + "beacon-api-client", + "ethereum-consensus", + "serde", + "serde_json", + "ssz-rs", + "thiserror", + "tracing", +] + [[package]] name = "milagro_bls" version = "1.4.2" @@ -3703,9 +3925,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", @@ -3856,7 +4078,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.10", + "pin-project 1.0.11", "smallvec", "unsigned-varint 0.7.1", ] @@ -3870,7 +4092,7 @@ dependencies = [ "bytes", "futures", "log", - "pin-project 1.0.10", + "pin-project 1.0.11", "smallvec", "unsigned-varint 0.7.1", ] @@ -3951,9 +4173,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" +checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" dependencies = [ "bitflags", "cfg-if", @@ -3967,6 +4189,7 @@ dependencies = [ "beacon_node", "environment", "eth2", + "execution_layer", "sensitive_url", "tempfile", "types", @@ -4077,18 +4300,18 @@ dependencies = [ [[package]] name = "object" -version = "0.28.4" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" +checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" [[package]] name = "oorandom" @@ -4104,9 +4327,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.40" +version = "0.10.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" +checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" dependencies = [ "bitflags", "cfg-if", @@ -4136,18 +4359,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.20.0+1.1.1o" +version = "111.22.0+1.1.1q" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92892c4f87d56e376e469ace79f1128fdaded07646ddf73aa0be4706ff712dec" +checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.74" +version = "0.9.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835363342df5fba8354c5b453325b110ffd54044e588c539cf2f20a8014e4cb1" +checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" dependencies = [ "autocfg 1.1.0", "cc", @@ -4162,7 +4385,7 @@ name = "operation_pool" version = "0.2.0" dependencies = [ "beacon_chain", - "bitvec 1.0.0", + "bitvec 1.0.1", "derivative", "eth2_ssz", "eth2_ssz_derive", @@ -4209,7 +4432,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" dependencies = [ "arrayvec", - "bitvec 1.0.0", + "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive 3.1.3", @@ -4320,9 +4543,9 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9a3b09a20e374558580a4914d3b7d89bd61b954a5a5e1dcbea98753addb1947" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" dependencies = [ "base64", ] @@ -4335,10 +4558,11 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pest" -version = "2.1.3" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "69486e2b8c2d2aeb9762db7b4e00b0331156393555cff467f4163ff06821eef8" dependencies = [ + "thiserror", "ucd-trie", ] @@ -4353,28 +4577,38 @@ dependencies = [ ] [[package]] -name = "pin-project" -version = "0.4.29" +name = "pharos" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ - "pin-project-internal 0.4.29", + "futures", + "rustc_version 0.4.0", ] [[package]] name = "pin-project" -version = "1.0.10" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" dependencies = [ - "pin-project-internal 1.0.10", + "pin-project-internal 0.4.30", +] + +[[package]] +name = "pin-project" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +dependencies = [ + "pin-project-internal 1.0.11", ] [[package]] name = "pin-project-internal" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" +checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" dependencies = [ "proc-macro2", "quote", @@ -4383,9 +4617,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" dependencies = [ "proc-macro2", "quote", @@ -4445,9 +4679,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" +checksum = "9428003b84df1496fb9d6eeee9c5f8145cb41ca375eb0dad204328888832811f" dependencies = [ "num-traits", "plotters-backend", @@ -4458,15 +4692,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" +checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" [[package]] name = "plotters-svg" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" +checksum = "e0918736323d1baff32ee0eade54984f6f201ad7e97d5cfb5d6ab4a358529615" dependencies = [ "plotters-backend", ] @@ -4568,9 +4802,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.39" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" +checksum = "c278e965f1d8cf32d6e0e96de3d3e79712178ae67986d9cf9151f51e95aac89b" dependencies = [ "unicode-ident", ] @@ -4825,21 +5059,21 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" dependencies = [ "proc-macro2", ] [[package]] name = "r2d2" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "scheduled-thread-pool", ] @@ -4971,9 +5205,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -4991,9 +5225,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.6" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -5011,9 +5245,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.26" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "remove_dir_all" @@ -5039,6 +5273,7 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-rustls", "hyper-tls", "ipnet", "js-sys", @@ -5048,17 +5283,21 @@ dependencies = [ "native-tls", "percent-encoding", "pin-project-lite 0.2.9", + "rustls 0.20.6", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-rustls 0.23.4", "tokio-util 0.7.3", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.10.1", ] @@ -5192,7 +5431,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.10", + "semver 1.0.12", ] [[package]] @@ -5221,10 +5460,19 @@ dependencies = [ ] [[package]] -name = "rustversion" -version = "1.0.6" +name = "rustls-pemfile" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +dependencies = [ + "base64", +] + +[[package]] +name = "rustversion" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8" [[package]] name = "rw-stream-sink" @@ -5233,7 +5481,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ "futures", - "pin-project 0.4.29", + "pin-project 0.4.30", "static_assertions", ] @@ -5244,7 +5492,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" dependencies = [ "futures", - "pin-project 1.0.10", + "pin-project 1.0.11", "static_assertions", ] @@ -5419,9 +5667,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41d061efea015927ac527063765e73601444cdc344ba855bc7bd44578b25e1c" +checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" [[package]] name = "semver-parser" @@ -5438,6 +5686,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" + [[package]] name = "sensitive_url" version = "0.1.0" @@ -5448,9 +5702,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.137" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "fc855a42c7967b7c369eb5860f7164ef1f6f81c20c7cc1141f2a604e18723b03" dependencies = [ "serde_derive", ] @@ -5477,9 +5731,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "6f2122636b9fe3b81f1cb25099fcf2d3f542cdb1d45940d56c713158884a05da" dependencies = [ "proc-macro2", "quote", @@ -5488,9 +5742,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" dependencies = [ "itoa 1.0.2", "ryu", @@ -5544,9 +5798,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.24" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ "indexmap", "ryu", @@ -5667,7 +5921,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.9", + "time 0.3.11", ] [[package]] @@ -5690,9 +5944,12 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg 1.1.0", +] [[package]] name = "slasher" @@ -5787,7 +6044,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.9", + "time 0.3.11", ] [[package]] @@ -5832,7 +6089,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.9", + "time 0.3.11", ] [[package]] @@ -5871,9 +6128,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" [[package]] name = "snap" @@ -5949,6 +6206,31 @@ dependencies = [ "der 0.5.1", ] +[[package]] +name = "ssz-rs" +version = "0.8.0" +source = "git+https://github.com/ralexstokes/ssz-rs#bd7cfb5a836e28747e6ce5e570234d14df0b24f7" +dependencies = [ + "bitvec 1.0.1", + "hex", + "lazy_static", + "num-bigint", + "serde", + "sha2 0.9.9", + "ssz-rs-derive", + "thiserror", +] + +[[package]] +name = "ssz-rs-derive" +version = "0.8.0" +source = "git+https://github.com/ralexstokes/ssz-rs#bd7cfb5a836e28747e6ce5e570234d14df0b24f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -6046,9 +6328,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.24.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef" +checksum = "4faebde00e8ff94316c01800f9054fd2ba77d30d9e922541913051d1d978918b" dependencies = [ "heck 0.4.0", "proc-macro2", @@ -6088,15 +6370,21 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.96" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" +checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "synstructure" version = "0.12.6" @@ -6257,9 +6545,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" +checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217" dependencies = [ "itoa 1.0.2", "libc", @@ -6339,10 +6627,11 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.19.2" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" +checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" dependencies = [ + "autocfg 1.1.0", "bytes", "libc", "memchr", @@ -6399,6 +6688,17 @@ dependencies = [ "webpki 0.21.4", ] +[[package]] +name = "tokio-rustls" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls 0.20.6", + "tokio", + "webpki 0.22.0", +] + [[package]] name = "tokio-stream" version = "0.1.9" @@ -6419,9 +6719,25 @@ checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ "futures-util", "log", - "pin-project 1.0.10", + "pin-project 1.0.11", "tokio", - "tungstenite", + "tungstenite 0.14.0", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" +dependencies = [ + "futures-util", + "log", + "rustls 0.20.6", + "tokio", + "tokio-rustls 0.23.4", + "tungstenite 0.17.3", + "webpki 0.22.0", + "webpki-roots", ] [[package]] @@ -6463,6 +6779,47 @@ dependencies = [ "serde", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project 1.0.11", + "pin-project-lite 0.2.9", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite 0.2.9", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.2" @@ -6484,9 +6841,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", @@ -6495,14 +6852,24 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" +checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" dependencies = [ "once_cell", "valuable", ] +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project 1.0.11", + "tracing", +] + [[package]] name = "tracing-log" version = "0.1.3" @@ -6516,13 +6883,13 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.11" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" +checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b" dependencies = [ "ansi_term", - "lazy_static", "matchers", + "once_cell", "regex", "sharded-slab", "smallvec", @@ -6645,6 +7012,27 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" +dependencies = [ + "base64", + "byteorder", + "bytes", + "http", + "httparse", + "log", + "rand 0.8.5", + "rustls 0.20.6", + "sha-1 0.10.0", + "thiserror", + "url", + "utf-8", + "webpki 0.22.0", +] + [[package]] name = "twoway" version = "0.1.8" @@ -6710,9 +7098,9 @@ dependencies = [ [[package]] name = "ucd-trie" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" +checksum = "89570599c4fe5585de2b388aab47e99f7fa4e9238a1399f707a02e356058141c" [[package]] name = "uint" @@ -6750,15 +7138,15 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" +checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" dependencies = [ "tinyvec", ] @@ -6986,15 +7374,15 @@ dependencies = [ "mime_guess", "multipart", "percent-encoding", - "pin-project 1.0.10", + "pin-project 1.0.11", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "tokio-stream", - "tokio-tungstenite", + "tokio-tungstenite 0.15.0", "tokio-util 0.6.10", "tower-service", "tracing", @@ -7038,9 +7426,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" +checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -7048,13 +7436,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" +checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -7063,9 +7451,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.31" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de9a9cec1733468a8c657e57fa2413d2ae2c0129b95e87c5b72b8ace4d13f31f" +checksum = "fa76fb221a1f8acddf5b54ace85912606980ad661ac7a503b4570ffd3a624dad" dependencies = [ "cfg-if", "js-sys", @@ -7075,9 +7463,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" +checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7085,9 +7473,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" +checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" dependencies = [ "proc-macro2", "quote", @@ -7098,15 +7486,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" +checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" [[package]] name = "wasm-bindgen-test" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b30cf2cba841a812f035c40c50f53eb9c56181192a9dd2c71b65e6a87a05ba" +checksum = "513df541345bb9fcc07417775f3d51bbb677daf307d8035c0afafd87dc2e6599" dependencies = [ "console_error_panic_hook", "js-sys", @@ -7118,9 +7506,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad594bf33e73cafcac2ae9062fc119d4f75f9c77e25022f91c9a64bd5b6463" +checksum = "6150d36a03e90a3cf6c12650be10626a9902d70c5270fd47d7a47e5389a10d56" dependencies = [ "proc-macro2", "quote", @@ -7143,9 +7531,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.58" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" +checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1" dependencies = [ "js-sys", "wasm-bindgen", @@ -7172,7 +7560,7 @@ dependencies = [ "log", "once_cell", "parking_lot 0.12.1", - "pin-project 1.0.10", + "pin-project 1.0.11", "reqwest", "rlp", "secp256k1", @@ -7245,9 +7633,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" +checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" dependencies = [ "webpki 0.22.0", ] @@ -7379,6 +7767,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "ws_stream_wasm" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47ca1ab42f5afed7fc332b22b6e932ca5414b209465412c8cdf0ad23bc0de645" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "pharos", + "rustc_version 0.4.0", + "send_wrapper", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.2.0" @@ -7445,9 +7851,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.3" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] diff --git a/Dockerfile b/Dockerfile index aa2853ce4f..86a69c6539 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.58.1-bullseye AS builder +FROM rust:1.62.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/Makefile b/Makefile index a97637bfd1..53fd4143d9 100644 --- a/Makefile +++ b/Makefile @@ -146,10 +146,9 @@ lint: -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push -# FIXME: fails if --release is added due to broken HTTP API tests nightly-lint: cp .github/custom/clippy.toml . - cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests -- \ + cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \ -A clippy::all \ -D clippy::disallowed_from_async rm clippy.toml diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 4c7140df39..c581866a25 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -280,6 +280,8 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin password_opt, graffiti, suggested_fee_recipient, + None, + None, ) .map_err(|e| format!("Unable to create new validator definition: {:?}", e))?; diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index ccb145caf9..ef430c2bc3 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.3.2-rc.0" +version = "2.5.0" authors = ["Paul Hauner ", "Age Manning ; @@ -137,6 +140,9 @@ const MAX_PER_SLOT_FORK_CHOICE_DISTANCE: u64 = 4; pub const INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON: &str = "Justified block has an invalid execution payload."; +pub const INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON: &str = + "Finalized merge transition block is invalid."; + /// Defines the behaviour when a block/block-root for a skipped slot is requested. pub enum WhenSlotSkipped { /// If the slot is a skip slot, return `None`. @@ -528,6 +534,7 @@ impl BeaconChain { /// Even more efficient variant of `forwards_iter_block_roots` that will avoid cloning the head /// state if it isn't required for the requested range of blocks. + /// The range [start_slot, end_slot] is inclusive (ie `start_slot <= end_slot`) pub fn forwards_iter_block_roots_until( &self, start_slot: Slot, @@ -1292,23 +1299,28 @@ impl BeaconChain { epoch: Epoch, head_block_root: Hash256, ) -> Result<(Vec>, Hash256, ExecutionStatus), Error> { - self.with_committee_cache(head_block_root, epoch, |committee_cache, dependent_root| { - let duties = validator_indices - .iter() - .map(|validator_index| { - let validator_index = *validator_index as usize; - committee_cache.get_attestation_duties(validator_index) - }) - .collect(); + let execution_status = self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::AttestationHeadNotInForkChoice(head_block_root))?; - let execution_status = self - .canonical_head - .fork_choice_read_lock() - .get_block_execution_status(&head_block_root) - .ok_or(Error::AttestationHeadNotInForkChoice(head_block_root))?; + let (duties, dependent_root) = self.with_committee_cache( + head_block_root, + epoch, + |committee_cache, dependent_root| { + let duties = validator_indices + .iter() + .map(|validator_index| { + let validator_index = *validator_index as usize; + committee_cache.get_attestation_duties(validator_index) + }) + .collect(); - Ok((duties, dependent_root, execution_status)) - }) + Ok((duties, dependent_root)) + }, + )?; + Ok((duties, dependent_root, execution_status)) } /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. @@ -1377,10 +1389,41 @@ impl BeaconChain { pub fn get_aggregated_sync_committee_contribution( &self, sync_contribution_data: &SyncContributionData, - ) -> Option> { - self.naive_sync_aggregation_pool + ) -> Result>, Error> { + if let Some(contribution) = self + .naive_sync_aggregation_pool .read() .get(sync_contribution_data) + { + self.filter_optimistic_sync_committee_contribution(contribution) + .map(Option::Some) + } else { + Ok(None) + } + } + + fn filter_optimistic_sync_committee_contribution( + &self, + contribution: SyncCommitteeContribution, + ) -> Result, Error> { + let beacon_block_root = contribution.beacon_block_root; + match self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&beacon_block_root) + { + // The contribution references a block that is not in fork choice, it must be + // pre-finalization. + None => Err(Error::SyncContributionDataReferencesFinalizedBlock { beacon_block_root }), + // The contribution references a fully valid `beacon_block_root`. + Some(execution_status) if execution_status.is_valid_or_irrelevant() => Ok(contribution), + // The contribution references a block that has not been verified by an EL (i.e. it + // is optimistic or invalid). Don't return the block, return an error instead. + Some(execution_status) => Err(Error::HeadBlockNotFullyVerified { + beacon_block_root, + execution_status, + }), + } } /// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`. @@ -1738,6 +1781,7 @@ impl BeaconChain { self.slot()?, verified.indexed_attestation(), AttestationFromBlock::False, + &self.spec, ) .map_err(Into::into) } @@ -2057,11 +2101,20 @@ impl BeaconChain { )?) } - /// Accept some attester slashing and queue it for inclusion in an appropriate block. + /// Accept a verified attester slashing and: + /// + /// 1. Apply it to fork choice. + /// 2. Add it to the op pool. pub fn import_attester_slashing( &self, attester_slashing: SigVerifiedOp>, ) { + // Add to fork choice. + self.canonical_head + .fork_choice_write_lock() + .on_attester_slashing(attester_slashing.as_inner()); + + // Add to the op pool (if we have the ability to propose blocks). if self.eth1_chain.is_some() { self.op_pool.insert_attester_slashing( attester_slashing, @@ -2215,6 +2268,7 @@ impl BeaconChain { pub async fn process_chain_segment( self: &Arc, chain_segment: Vec>>, + count_unrealized: CountUnrealized, ) -> ChainSegmentResult { let mut imported_blocks = 0; @@ -2279,7 +2333,10 @@ impl BeaconChain { // Import the blocks into the chain. for signature_verified_block in signature_verified_blocks { - match self.process_block(signature_verified_block).await { + match self + .process_block(signature_verified_block, count_unrealized) + .await + { Ok(_) => imported_blocks += 1, Err(error) => { return ChainSegmentResult::Failed { @@ -2363,6 +2420,7 @@ impl BeaconChain { pub async fn process_block>( self: &Arc, unverified_block: B, + count_unrealized: CountUnrealized, ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2378,7 +2436,7 @@ impl BeaconChain { let import_block = async move { let execution_pending = unverified_block.into_execution_pending_block(&chain)?; chain - .import_execution_pending_block(execution_pending) + .import_execution_pending_block(execution_pending, count_unrealized) .await }; @@ -2436,6 +2494,7 @@ impl BeaconChain { async fn import_execution_pending_block( self: Arc, execution_pending_block: ExecutionPendingBlock, + count_unrealized: CountUnrealized, ) -> Result> { let ExecutionPendingBlock { block, @@ -2494,6 +2553,7 @@ impl BeaconChain { state, confirmed_state_roots, payload_verification_status, + count_unrealized, ) }, "payload_verification_handle", @@ -2515,6 +2575,7 @@ impl BeaconChain { mut state: BeaconState, confirmed_state_roots: Vec, payload_verification_status: PayloadVerificationStatus, + count_unrealized: CountUnrealized, ) -> Result> { let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); @@ -2660,6 +2721,7 @@ impl BeaconChain { &state, payload_verification_status, &self.spec, + count_unrealized.and(self.config.count_unrealized.into()), ) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } @@ -2670,6 +2732,11 @@ impl BeaconChain { .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), &state); let validator_monitor = self.validator_monitor.read(); + // Register each attester slashing in the block with fork choice. + for attester_slashing in block.body().attester_slashings() { + fork_choice.on_attester_slashing(attester_slashing); + } + // Register each attestation in the block with the fork choice service. for attestation in block.body().attestations() { let _fork_choice_attestation_timer = @@ -2685,6 +2752,7 @@ impl BeaconChain { current_slot, &indexed_attestation, AttestationFromBlock::True, + &self.spec, ) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The @@ -2743,32 +2811,38 @@ impl BeaconChain { if !payload_verification_status.is_optimistic() && block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot { - let new_head_root = fork_choice - .get_head(current_slot, &self.spec) - .map_err(BeaconChainError::from)?; - - if new_head_root == block_root { - if let Some(proto_block) = fork_choice.get_block(&block_root) { - if let Err(e) = self.early_attester_cache.add_head_block( - block_root, - signed_block.clone(), - proto_block, - &state, - &self.spec, - ) { + match fork_choice.get_head(current_slot, &self.spec) { + // This block became the head, add it to the early attester cache. + Ok(new_head_root) if new_head_root == block_root => { + if let Some(proto_block) = fork_choice.get_block(&block_root) { + if let Err(e) = self.early_attester_cache.add_head_block( + block_root, + signed_block.clone(), + proto_block, + &state, + &self.spec, + ) { + warn!( + self.log, + "Early attester cache insert failed"; + "error" => ?e + ); + } + } else { warn!( self.log, - "Early attester cache insert failed"; - "error" => ?e + "Early attester block missing"; + "block_root" => ?block_root ); } - } else { - warn!( - self.log, - "Early attester block missing"; - "block_root" => ?block_root - ); } + // This block did not become the head, nothing to do. + Ok(_) => (), + Err(e) => error!( + self.log, + "Failed to compute head during block import"; + "error" => ?e + ), } } @@ -2908,6 +2982,7 @@ impl BeaconChain { event_handler.register(EventKind::Block(SseBlock { slot, block: block_root, + execution_optimistic: payload_verification_status.is_optimistic(), })); } } @@ -3246,24 +3321,29 @@ impl BeaconChain { let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; - let pubkey_opt = state + let pubkey = state .validators() .get(proposer_index as usize) - .map(|v| v.pubkey); + .map(|v| v.pubkey) + .ok_or(BlockProductionError::BeaconChain( + BeaconChainError::ValidatorIndexUnknown(proposer_index as usize), + ))?; + + let builder_params = BuilderParams { + pubkey, + slot: state.slot(), + chain_health: self + .is_healthy() + .map_err(BlockProductionError::BeaconChain)?, + }; // If required, start the process of loading an execution payload from the EL early. This // allows it to run concurrently with things like attestation packing. let prepare_payload_handle = match &state { BeaconState::Base(_) | BeaconState::Altair(_) => None, BeaconState::Merge(_) => { - let finalized_checkpoint = self.canonical_head.cached_head().finalized_checkpoint(); - let prepare_payload_handle = get_execution_payload( - self.clone(), - &state, - finalized_checkpoint, - proposer_index, - pubkey_opt, - )?; + let prepare_payload_handle = + get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; Some(prepare_payload_handle) } }; @@ -3578,16 +3658,7 @@ impl BeaconChain { // Run fork choice since it's possible that the payload invalidation might result in a new // head. - // - // Don't return early though, since invalidating the justified checkpoint might cause an - // error here. - if let Err(e) = self.recompute_head_at_current_slot().await { - crit!( - self.log, - "Failed to run fork choice routine"; - "error" => ?e, - ); - } + self.recompute_head_at_current_slot().await; // Obtain the justified root from fork choice. // @@ -3917,11 +3988,15 @@ impl BeaconChain { // `execution_engine_forkchoice_lock` apart from the one here. let forkchoice_lock = execution_layer.execution_engine_forkchoice_lock().await; - let (head_block_root, head_hash, finalized_hash) = if let Some(head_hash) = params.head_hash + let (head_block_root, head_hash, justified_hash, finalized_hash) = if let Some(head_hash) = + params.head_hash { ( params.head_root, head_hash, + params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), params .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), @@ -3935,14 +4010,13 @@ impl BeaconChain { ForkName::Base | ForkName::Altair => return Ok(()), _ => { // We are post-bellatrix - if execution_layer + if let Some(payload_attributes) = execution_layer .payload_attributes(next_slot, params.head_root) .await - .is_some() { // We are a proposer, check for terminal_pow_block_hash if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec) + .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp) .await .map_err(Error::ForkchoiceUpdate)? { @@ -3953,6 +4027,9 @@ impl BeaconChain { ( params.head_root, terminal_pow_block_hash, + params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), params .finalized_hash .unwrap_or_else(ExecutionBlockHash::zero), @@ -3970,7 +4047,13 @@ impl BeaconChain { }; let forkchoice_updated_response = execution_layer - .notify_forkchoice_updated(head_hash, finalized_hash, current_slot, head_block_root) + .notify_forkchoice_updated( + head_hash, + justified_hash, + finalized_hash, + current_slot, + head_block_root, + ) .await .map_err(Error::ExecutionForkChoiceUpdateFailed); @@ -4075,10 +4158,11 @@ impl BeaconChain { /// Returns the value of `execution_optimistic` for `block`. /// /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. - /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. - pub fn is_optimistic_block( + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or has + /// `ExecutionStatus::Invalid`. + pub fn is_optimistic_or_invalid_block>( &self, - block: &SignedBeaconBlock, + block: &SignedBeaconBlock, ) -> Result { // Check if the block is pre-Bellatrix. if self.slot_is_prior_to_bellatrix(block.slot()) { @@ -4086,7 +4170,7 @@ impl BeaconChain { } else { self.canonical_head .fork_choice_read_lock() - .is_optimistic_block(&block.canonical_root()) + .is_optimistic_or_invalid_block(&block.canonical_root()) .map_err(BeaconChainError::ForkChoiceError) } } @@ -4094,7 +4178,7 @@ impl BeaconChain { /// Returns the value of `execution_optimistic` for `head_block`. /// /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. - /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or `ExecutionStatus::Invalid`. /// /// This function will return an error if `head_block` is not present in the fork choice store /// and so should only be used on the head block or when the block *should* be present in the @@ -4102,9 +4186,9 @@ impl BeaconChain { /// /// There is a potential race condition when syncing where the block_root of `head_block` could /// be pruned from the fork choice store before being read. - pub fn is_optimistic_head_block( + pub fn is_optimistic_or_invalid_head_block>( &self, - head_block: &SignedBeaconBlock, + head_block: &SignedBeaconBlock, ) -> Result { // Check if the block is pre-Bellatrix. if self.slot_is_prior_to_bellatrix(head_block.slot()) { @@ -4112,7 +4196,7 @@ impl BeaconChain { } else { self.canonical_head .fork_choice_read_lock() - .is_optimistic_block_no_fallback(&head_block.canonical_root()) + .is_optimistic_or_invalid_block_no_fallback(&head_block.canonical_root()) .map_err(BeaconChainError::ForkChoiceError) } } @@ -4121,17 +4205,17 @@ impl BeaconChain { /// You can optionally provide `head_info` if it was computed previously. /// /// Returns `Ok(false)` if the head block is pre-Bellatrix, or has `ExecutionStatus::Valid`. - /// Returns `Ok(true)` if the head block has `ExecutionStatus::Optimistic`. + /// Returns `Ok(true)` if the head block has `ExecutionStatus::Optimistic` or `ExecutionStatus::Invalid`. /// /// There is a potential race condition when syncing where the block root of `head_info` could /// be pruned from the fork choice store before being read. - pub fn is_optimistic_head(&self) -> Result { + pub fn is_optimistic_or_invalid_head(&self) -> Result { self.canonical_head .head_execution_status() - .map(|status| status.is_optimistic()) + .map(|status| status.is_optimistic_or_invalid()) } - pub fn is_optimistic_block_root( + pub fn is_optimistic_or_invalid_block_root( &self, block_slot: Slot, block_root: &Hash256, @@ -4142,7 +4226,7 @@ impl BeaconChain { } else { self.canonical_head .fork_choice_read_lock() - .is_optimistic_block_no_fallback(block_root) + .is_optimistic_or_invalid_block_no_fallback(block_root) .map_err(BeaconChainError::ForkChoiceError) } } @@ -4220,14 +4304,7 @@ impl BeaconChain { } // Run fork choice and signal to any waiting task that it has completed. - if let Err(e) = self.recompute_head_at_current_slot().await { - error!( - self.log, - "Fork choice error at slot start"; - "error" => ?e, - "slot" => slot, - ); - } + self.recompute_head_at_current_slot().await; // Send the notification regardless of fork choice success, this is a "best effort" // notification and we don't want block production to hit the timeout in case of error. @@ -4507,6 +4584,74 @@ impl BeaconChain { .map(|duration| (fork_name, duration)) } + /// This method serves to get a sense of the current chain health. It is used in block proposal + /// to determine whether we should outsource payload production duties. + /// + /// Since we are likely calling this during the slot we are going to propose in, don't take into + /// account the current slot when accounting for skips. + pub fn is_healthy(&self) -> Result { + // Check if the merge has been finalized. + if let Some(finalized_hash) = self + .canonical_head + .cached_head() + .forkchoice_update_parameters() + .finalized_hash + { + if ExecutionBlockHash::zero() == finalized_hash { + return Ok(ChainHealth::PreMerge); + } + } else { + return Ok(ChainHealth::PreMerge); + }; + + if self.config.builder_fallback_disable_checks { + return Ok(ChainHealth::Healthy); + } + + let current_slot = self.slot()?; + + // Check slots at the head of the chain. + let prev_slot = current_slot.saturating_sub(Slot::new(1)); + let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); + let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; + + // Check if finalization is advancing. + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + let epochs_since_finalization = current_epoch.saturating_sub( + self.canonical_head + .cached_head() + .finalized_checkpoint() + .epoch, + ); + let finalization_check = epochs_since_finalization.as_usize() + <= self.config.builder_fallback_epochs_since_finalization; + + // Check skip slots in the last `SLOTS_PER_EPOCH`. + let start_slot = current_slot.saturating_sub(T::EthSpec::slots_per_epoch()); + let mut epoch_skips = 0; + for slot in start_slot.as_u64()..current_slot.as_u64() { + if self + .block_root_at_slot_skips_none(Slot::new(slot))? + .is_none() + { + epoch_skips += 1; + } + } + let epoch_skips_check = epoch_skips <= self.config.builder_fallback_skips_per_epoch; + + if !head_skips_check { + Ok(ChainHealth::Unhealthy(FailedCondition::Skips)) + } else if !finalization_check { + Ok(ChainHealth::Unhealthy( + FailedCondition::EpochsSinceFinalization, + )) + } else if !epoch_skips_check { + Ok(ChainHealth::Unhealthy(FailedCondition::SkipsPerEpoch)) + } else { + Ok(ChainHealth::Healthy) + } + } + pub fn dump_as_dot(&self, output: &mut W) { let canonical_head_hash = self.canonical_head.cached_head().head_block_root(); let mut visited: HashSet = HashSet::new(); diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index c7663c77c4..4f6003fda1 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -8,6 +8,7 @@ use crate::{metrics, BeaconSnapshot}; use derivative::Derivative; use fork_choice::ForkChoiceStore; use ssz_derive::{Decode, Encode}; +use std::collections::BTreeSet; use std::marker::PhantomData; use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; @@ -155,7 +156,10 @@ pub struct BeaconForkChoiceStore, Cold: ItemStore< justified_checkpoint: Checkpoint, justified_balances: Vec, best_justified_checkpoint: Checkpoint, + unrealized_justified_checkpoint: Checkpoint, + unrealized_finalized_checkpoint: Checkpoint, proposer_boost_root: Hash256, + equivocating_indices: BTreeSet, _phantom: PhantomData, } @@ -201,7 +205,10 @@ where justified_balances: anchor_state.balances().clone().into(), finalized_checkpoint, best_justified_checkpoint: justified_checkpoint, + unrealized_justified_checkpoint: justified_checkpoint, + unrealized_finalized_checkpoint: finalized_checkpoint, proposer_boost_root: Hash256::zero(), + equivocating_indices: BTreeSet::new(), _phantom: PhantomData, } } @@ -216,7 +223,10 @@ where justified_checkpoint: self.justified_checkpoint, justified_balances: self.justified_balances.clone(), best_justified_checkpoint: self.best_justified_checkpoint, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, proposer_boost_root: self.proposer_boost_root, + equivocating_indices: self.equivocating_indices.clone(), } } @@ -233,7 +243,10 @@ where justified_checkpoint: persisted.justified_checkpoint, justified_balances: persisted.justified_balances, best_justified_checkpoint: persisted.best_justified_checkpoint, + unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, proposer_boost_root: persisted.proposer_boost_root, + equivocating_indices: persisted.equivocating_indices, _phantom: PhantomData, }) } @@ -280,6 +293,14 @@ where &self.finalized_checkpoint } + fn unrealized_justified_checkpoint(&self) -> &Checkpoint { + &self.unrealized_justified_checkpoint + } + + fn unrealized_finalized_checkpoint(&self) -> &Checkpoint { + &self.unrealized_finalized_checkpoint + } + fn proposer_boost_root(&self) -> Hash256 { self.proposer_boost_root } @@ -323,29 +344,51 @@ where self.best_justified_checkpoint = checkpoint } + fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint) { + self.unrealized_justified_checkpoint = checkpoint; + } + + fn set_unrealized_finalized_checkpoint(&mut self, checkpoint: Checkpoint) { + self.unrealized_finalized_checkpoint = checkpoint; + } + fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256) { self.proposer_boost_root = proposer_boost_root; } + + fn equivocating_indices(&self) -> &BTreeSet { + &self.equivocating_indices + } + + fn extend_equivocating_indices(&mut self, indices: impl IntoIterator) { + self.equivocating_indices.extend(indices); + } } /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. #[superstruct( - variants(V1, V7, V8), + variants(V1, V7, V8, V10, V11), variant_attributes(derive(Encode, Decode)), no_enum )] pub struct PersistedForkChoiceStore { #[superstruct(only(V1, V7))] pub balances_cache: BalancesCacheV1, - #[superstruct(only(V8))] + #[superstruct(only(V8, V10, V11))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec, pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V7, V8))] + #[superstruct(only(V10, V11))] + pub unrealized_justified_checkpoint: Checkpoint, + #[superstruct(only(V10, V11))] + pub unrealized_finalized_checkpoint: Checkpoint, + #[superstruct(only(V7, V8, V10, V11))] pub proposer_boost_root: Hash256, + #[superstruct(only(V11))] + pub equivocating_indices: BTreeSet, } -pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV8; +pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV11; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 07f8f7cc24..ec57fcabd9 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -44,7 +44,7 @@ //! ``` use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - PayloadNotifier, + AllowOptimisticImport, PayloadNotifier, }; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; @@ -335,17 +335,32 @@ pub enum ExecutionPayloadError { terminal_block_hash: ExecutionBlockHash, payload_parent_hash: ExecutionBlockHash, }, - /// The execution node failed to provide a parent block to a known block. This indicates an - /// issue with the execution node. + /// The execution node is syncing but we fail the conditions for optimistic sync /// /// ## Peer scoring /// /// The peer is not necessarily invalid. - PoWParentMissing(ExecutionBlockHash), - /// The execution node is syncing but we fail the conditions for optimistic sync UnverifiedNonOptimisticCandidate, } +impl ExecutionPayloadError { + pub fn penalize_peer(&self) -> bool { + // This match statement should never have a default case so that we are + // always forced to consider here whether or not to penalize a peer when + // we add a new error condition. + match self { + ExecutionPayloadError::NoExecutionConnection => false, + ExecutionPayloadError::RequestFailed(_) => false, + ExecutionPayloadError::RejectedByExecutionEngine { .. } => true, + ExecutionPayloadError::InvalidPayloadTimestamp { .. } => true, + ExecutionPayloadError::InvalidTerminalPoWBlock { .. } => true, + ExecutionPayloadError::InvalidActivationEpoch { .. } => true, + ExecutionPayloadError::InvalidTerminalBlockHash { .. } => true, + ExecutionPayloadError::UnverifiedNonOptimisticCandidate => false, + } + } +} + impl From for ExecutionPayloadError { fn from(e: execution_layer::Error) -> Self { ExecutionPayloadError::RequestFailed(e) @@ -1184,7 +1199,7 @@ impl ExecutionPendingBlock { // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no // calls to remote servers. if is_valid_merge_transition_block { - validate_merge_block(&chain, block.message()).await?; + validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; }; // The specification declares that this should be run *inside* `per_block_processing`, @@ -1407,6 +1422,10 @@ fn check_block_against_finalized_slot( block_root: Hash256, chain: &BeaconChain, ) -> Result<(), BlockError> { + // The finalized checkpoint is being read from fork choice, rather than the cached head. + // + // Fork choice has the most up-to-date view of finalization and there's no point importing a + // block which conflicts with the fork-choice view of finalization. let finalized_slot = chain .canonical_head .cached_head() diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index cef33ee4f7..252b7cef5a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -647,6 +647,7 @@ where store.clone(), Some(current_slot), &self.spec, + self.chain_config.count_unrealized.into(), )?; } diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index c02ddb8263..709382f05b 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -99,6 +99,8 @@ pub struct CachedHead { /// The `execution_payload.block_hash` of the block at the head of the chain. Set to `None` /// before Bellatrix. head_hash: Option, + /// The `execution_payload.block_hash` of the justified block. Set to `None` before Bellatrix. + justified_hash: Option, /// The `execution_payload.block_hash` of the finalized block. Set to `None` before Bellatrix. finalized_hash: Option, } @@ -183,6 +185,7 @@ impl CachedHead { ForkchoiceUpdateParameters { head_root: self.snapshot.beacon_block_root, head_hash: self.head_hash, + justified_hash: self.justified_hash, finalized_hash: self.finalized_hash, } } @@ -224,6 +227,7 @@ impl CanonicalHead { justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, head_hash: forkchoice_update_params.head_hash, + justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, }; @@ -272,6 +276,7 @@ impl CanonicalHead { justified_checkpoint: fork_choice_view.justified_checkpoint, finalized_checkpoint: fork_choice_view.finalized_checkpoint, head_hash: forkchoice_update_params.head_hash, + justified_hash: forkchoice_update_params.justified_hash, finalized_hash: forkchoice_update_params.finalized_hash, }; @@ -295,6 +300,23 @@ impl CanonicalHead { .ok_or(Error::HeadMissingFromForkChoice(head_block_root)) } + /// Returns a clone of the `CachedHead` and the execution status of the contained head block. + /// + /// This will only return `Err` in the scenario where `self.fork_choice` has advanced + /// significantly past the cached `head_snapshot`. In such a scenario it is likely prudent to + /// run `BeaconChain::recompute_head` to update the cached values. + pub fn head_and_execution_status( + &self, + ) -> Result<(CachedHead, ExecutionStatus), Error> { + let head = self.cached_head(); + let head_block_root = head.head_block_root(); + let execution_status = self + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::HeadMissingFromForkChoice(head_block_root))?; + Ok((head, execution_status)) + } + /// Returns a clone of `self.cached_head`. /// /// Takes a read-lock on `self.cached_head` for a short time (just long enough to clone it). @@ -412,9 +434,15 @@ impl BeaconChain { /// Execute the fork choice algorithm and enthrone the result as the canonical head. /// /// This method replaces the old `BeaconChain::fork_choice` method. - pub async fn recompute_head_at_current_slot(self: &Arc) -> Result<(), Error> { - let current_slot = self.slot()?; - self.recompute_head_at_slot(current_slot).await + pub async fn recompute_head_at_current_slot(self: &Arc) { + match self.slot() { + Ok(current_slot) => self.recompute_head_at_slot(current_slot).await, + Err(e) => error!( + self.log, + "No slot when recomputing head"; + "error" => ?e + ), + } } /// Execute the fork choice algorithm and enthrone the result as the canonical head. @@ -423,7 +451,13 @@ impl BeaconChain { /// different slot to the wall-clock can be useful for pushing fork choice into the next slot /// *just* before the start of the slot. This ensures that block production can use the correct /// head value without being delayed. - pub async fn recompute_head_at_slot(self: &Arc, current_slot: Slot) -> Result<(), Error> { + /// + /// This function purposefully does *not* return a `Result`. It's possible for fork choice to + /// fail to update if there is only one viable head and it has an invalid execution payload. In + /// such a case it's critical that the `BeaconChain` keeps importing blocks so that the + /// situation can be rectified. We avoid returning an error here so that calling functions + /// can't abort block import because an error is returned here. + pub async fn recompute_head_at_slot(self: &Arc, current_slot: Slot) { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); @@ -433,15 +467,15 @@ impl BeaconChain { move || chain.recompute_head_at_slot_internal(current_slot), "recompute_head_internal", ) - .await? + .await { // Fork choice returned successfully and did not need to update the EL. - Ok(None) => Ok(()), + Ok(Ok(None)) => (), // Fork choice returned successfully and needed to update the EL. It has returned a // join-handle from when it spawned some async tasks. We should await those tasks. - Ok(Some(join_handle)) => match join_handle.await { + Ok(Ok(Some(join_handle))) => match join_handle.await { // The async task completed successfully. - Ok(Some(())) => Ok(()), + Ok(Some(())) => (), // The async task did not complete successfully since the runtime is shutting down. Ok(None) => { debug!( @@ -449,7 +483,6 @@ impl BeaconChain { "Did not update EL fork choice"; "info" => "shutting down" ); - Err(Error::RuntimeShutdown) } // The async task did not complete successfully, tokio returned an error. Err(e) => { @@ -458,13 +491,24 @@ impl BeaconChain { "Did not update EL fork choice"; "error" => ?e ); - Err(Error::TokioJoin(e)) } }, // There was an error recomputing the head. - Err(e) => { + Ok(Err(e)) => { metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); - Err(e) + error!( + self.log, + "Error whist recomputing head"; + "error" => ?e + ); + } + // There was an error spawning the task. + Err(e) => { + error!( + self.log, + "Failed to spawn recompute head task"; + "error" => ?e + ); } } } @@ -612,6 +656,7 @@ impl BeaconChain { justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, head_hash: new_forkchoice_update_parameters.head_hash, + justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, }; @@ -638,6 +683,7 @@ impl BeaconChain { justified_checkpoint: new_view.justified_checkpoint, finalized_checkpoint: new_view.finalized_checkpoint, head_hash: new_forkchoice_update_parameters.head_hash, + justified_hash: new_forkchoice_update_parameters.justified_hash, finalized_hash: new_forkchoice_update_parameters.finalized_hash, }; @@ -706,6 +752,9 @@ impl BeaconChain { ) -> Result<(), Error> { let old_snapshot = &old_cached_head.snapshot; let new_snapshot = &new_cached_head.snapshot; + let new_head_is_optimistic = new_head_proto_block + .execution_status + .is_optimistic_or_invalid(); // Detect and potentially report any re-orgs. let reorg_distance = detect_reorg( @@ -791,6 +840,7 @@ impl BeaconChain { current_duty_dependent_root, previous_duty_dependent_root, epoch_transition: is_epoch_transition, + execution_optimistic: new_head_is_optimistic, })); } (Err(e), _) | (_, Err(e)) => { @@ -818,6 +868,7 @@ impl BeaconChain { new_head_block: new_snapshot.beacon_block_root, new_head_state: new_snapshot.beacon_state_root(), epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), + execution_optimistic: new_head_is_optimistic, })); } } @@ -834,6 +885,9 @@ impl BeaconChain { finalized_proto_block: ProtoBlock, ) -> Result<(), Error> { let new_snapshot = &new_cached_head.snapshot; + let finalized_block_is_optimistic = finalized_proto_block + .execution_status + .is_optimistic_or_invalid(); self.op_pool .prune_all(&new_snapshot.beacon_state, self.epoch()?); @@ -877,6 +931,7 @@ impl BeaconChain { // specific state root at the first slot of the finalized epoch (which // might be a skip slot). state: finalized_proto_block.state_root, + execution_optimistic: finalized_block_is_optimistic, })); } } @@ -1209,6 +1264,7 @@ fn observe_head_block_delays( let block_time_set_as_head = timestamp_now(); let head_block_root = head_block.root; let head_block_slot = head_block.slot; + let head_block_is_optimistic = head_block.execution_status.is_optimistic_or_invalid(); // Calculate the total delay between the start of the slot and when it was set as head. let block_delay_total = get_slot_delay_ms(block_time_set_as_head, head_block_slot, slot_clock); @@ -1301,6 +1357,7 @@ fn observe_head_block_delays( observed_delay: block_delays.observed, imported_delay: block_delays.imported, set_as_head_delay: block_delays.set_as_head, + execution_optimistic: head_block_is_optimistic, })); } } diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 36c2f41d9d..2c43ca53ed 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -24,6 +24,17 @@ pub struct ChainConfig { /// /// If set to 0 then block proposal will not wait for fork choice at all. pub fork_choice_before_proposal_timeout_ms: u64, + /// Number of skip slots in a row before the BN refuses to use connected builders during payload construction. + pub builder_fallback_skips: usize, + /// Number of skip slots in the past `SLOTS_PER_EPOCH` before the BN refuses to use connected + /// builders during payload construction. + pub builder_fallback_skips_per_epoch: usize, + /// Number of epochs since finalization before the BN refuses to use connected builders during + /// payload construction. + pub builder_fallback_epochs_since_finalization: usize, + /// Whether any chain health checks should be considered when deciding whether to use the builder API. + pub builder_fallback_disable_checks: bool, + pub count_unrealized: bool, } impl Default for ChainConfig { @@ -35,6 +46,12 @@ impl Default for ChainConfig { enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, + // Builder fallback configs that are set in `clap` will override these. + builder_fallback_skips: 3, + builder_fallback_skips_per_epoch: 8, + builder_fallback_epochs_since_finalization: 3, + builder_fallback_disable_checks: false, + count_unrealized: false, } } } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 62b584968f..1ddbe13241 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -86,7 +86,7 @@ impl EarlyAttesterCache { /// /// - There is a cache `item` present. /// - If `request_slot` is in the same epoch as `item.epoch`. - /// - If `request_index` does not exceed `item.comittee_count`. + /// - If `request_index` does not exceed `item.committee_count`. pub fn try_attest( &self, request_slot: Slot, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index d3337dfafe..604fb6bea3 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -138,6 +138,7 @@ pub enum BeaconChainError { new_slot: Slot, }, AltairForkDisabled, + BuilderMissing, ExecutionLayerMissing, BlockVariantLacksExecutionPayload(Hash256), ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, execution_layer::Error), @@ -184,6 +185,9 @@ pub enum BeaconChainError { CannotAttestToFinalizedBlock { beacon_block_root: Hash256, }, + SyncContributionDataReferencesFinalizedBlock { + beacon_block_root: Hash256, + }, RuntimeShutdown, TokioJoin(tokio::task::JoinError), ProcessInvalidExecutionPayload(JoinError), diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 747b8a468d..3c530aaac8 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -7,11 +7,12 @@ //! So, this module contains functions that one might expect to find in other crates, but they live //! here for good reason. +use crate::otb_verification_service::OptimisticTransitionBlock; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::PayloadStatus; +use execution_layer::{BuilderParams, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -27,6 +28,12 @@ use types::*; pub type PreparePayloadResult = Result; pub type PreparePayloadHandle = JoinHandle>>; +#[derive(PartialEq)] +pub enum AllowOptimisticImport { + Yes, + No, +} + /// Used to await the result of executing payload with a remote EE. pub struct PayloadNotifier { pub chain: Arc>, @@ -146,6 +153,7 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( pub async fn validate_merge_block<'a, T: BeaconChainTypes>( chain: &Arc>, block: BeaconBlockRef<'a, T::EthSpec>, + allow_optimistic_import: AllowOptimisticImport, ) -> Result<(), BlockError> { let spec = &chain.spec; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -188,13 +196,18 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } .into()), None => { - if is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? { + if allow_optimistic_import == AllowOptimisticImport::Yes + && is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? + { debug!( chain.log, - "Optimistically accepting terminal block"; + "Optimistically importing merge transition block"; "block_hash" => ?execution_payload.parent_hash(), "msg" => "the terminal block/parent was unavailable" ); + // Store Optimistic Transition Block in Database for later Verification + OptimisticTransitionBlock::from_block(block) + .persist_in_store::(&chain.store)?; Ok(()) } else { Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()) @@ -302,14 +315,12 @@ pub fn get_execution_payload< >( chain: Arc>, state: &BeaconState, - finalized_checkpoint: Checkpoint, proposer_index: u64, - pubkey: Option, + builder_params: BuilderParams, ) -> Result, BlockProductionError> { // Compute all required values from the `state` now to avoid needing to pass it into a spawned // task. let spec = &chain.spec; - let slot = state.slot(); let current_epoch = state.current_epoch(); let is_merge_transition_complete = is_merge_transition_complete(state); let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; @@ -326,14 +337,12 @@ pub fn get_execution_payload< async move { prepare_execution_payload::( &chain, - slot, is_merge_transition_complete, timestamp, random, - finalized_checkpoint, proposer_index, - pubkey, latest_execution_payload_header_block_hash, + builder_params, ) .await }, @@ -361,20 +370,18 @@ pub fn get_execution_payload< #[allow(clippy::too_many_arguments)] pub async fn prepare_execution_payload( chain: &Arc>, - slot: Slot, is_merge_transition_complete: bool, timestamp: u64, random: Hash256, - finalized_checkpoint: Checkpoint, proposer_index: u64, - pubkey: Option, latest_execution_payload_header_block_hash: ExecutionBlockHash, + builder_params: BuilderParams, ) -> Result where T: BeaconChainTypes, Payload: ExecPayload + Default, { - let current_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; let execution_layer = chain .execution_layer @@ -393,7 +400,7 @@ where } let terminal_pow_block_hash = execution_layer - .get_terminal_pow_block_hash(spec) + .get_terminal_pow_block_hash(spec, timestamp) .await .map_err(BlockProductionError::TerminalPoWBlockLookupFailed)?; @@ -408,44 +415,24 @@ where latest_execution_payload_header_block_hash }; - // Try to obtain the finalized proto block from fork choice. + // Try to obtain the fork choice update parameters from the cached head. // - // Use a blocking task to interact with the `fork_choice` lock otherwise we risk blocking the + // Use a blocking task to interact with the `canonical_head` lock otherwise we risk blocking the // core `tokio` executor. let inner_chain = chain.clone(); - let finalized_proto_block = chain + let forkchoice_update_params = chain .spawn_blocking_handle( move || { inner_chain .canonical_head - .fork_choice_read_lock() - .get_block(&finalized_checkpoint.root) + .cached_head() + .forkchoice_update_parameters() }, - "prepare_execution_payload_finalized_hash", + "prepare_execution_payload_forkchoice_update_params", ) .await .map_err(BlockProductionError::BeaconChain)?; - // The finalized block hash is not included in the specification, however we provide this - // parameter so that the execution layer can produce a payload id if one is not already known - // (e.g., due to a recent reorg). - let finalized_block_hash = if let Some(block) = finalized_proto_block { - block.execution_status.block_hash() - } else { - chain - .store - .get_blinded_block(&finalized_checkpoint.root) - .map_err(BlockProductionError::FailedToReadFinalizedBlock)? - .ok_or(BlockProductionError::MissingFinalizedBlock( - finalized_checkpoint.root, - ))? - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()) - }; - // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. // // This future is not executed here, it's up to the caller to await it. @@ -454,10 +441,10 @@ where parent_hash, timestamp, random, - finalized_block_hash.unwrap_or_else(ExecutionBlockHash::zero), proposer_index, - pubkey, - slot, + forkchoice_update_params, + builder_params, + &chain.spec, ) .await .map_err(BlockProductionError::GetPayloadFailed)?; diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index fc89429d3f..1d2787d985 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,5 +1,5 @@ use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::{ForkChoice, PayloadVerificationStatus}; +use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus}; use itertools::process_results; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; @@ -99,6 +99,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It store: Arc>, current_slot: Option, spec: &ChainSpec, + count_unrealized_config: CountUnrealized, ) -> Result, E>, String> { // Fetch finalized block. let finalized_checkpoint = head_state.finalized_checkpoint(); @@ -163,7 +164,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; let mut state = finalized_snapshot.beacon_state; - for block in blocks { + let blocks_len = blocks.len(); + for (i, block) in blocks.into_iter().enumerate() { complete_state_advance(&mut state, None, block.slot(), spec) .map_err(|e| format!("State advance failed: {:?}", e))?; @@ -183,6 +185,15 @@ pub fn reset_fork_choice_to_finalization, Cold: It // This scenario is so rare that it seems OK to double-verify some blocks. let payload_verification_status = PayloadVerificationStatus::Optimistic; + // Because we are replaying a single chain of blocks, we only need to calculate unrealized + // justification for the last block in the chain. + let is_last_block = i + 1 == blocks_len; + let count_unrealized = if is_last_block { + count_unrealized_config + } else { + CountUnrealized::False + }; + fork_choice .on_block( block.slot(), @@ -193,6 +204,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It &state, payload_verification_status, spec, + count_unrealized, ) .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index b82b690d20..ed6c2459eb 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -15,11 +15,12 @@ mod early_attester_cache; mod errors; pub mod eth1_chain; pub mod events; -mod execution_payload; +pub mod execution_payload; pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; +pub mod merge_readiness; mod metrics; pub mod migrate; mod naive_aggregation_pool; @@ -27,6 +28,7 @@ mod observed_aggregates; mod observed_attesters; mod observed_block_producers; pub mod observed_operations; +pub mod otb_verification_service; mod persisted_beacon_chain; mod persisted_fork_choice; mod pre_finalization_cache; @@ -43,7 +45,8 @@ mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + CountUnrealized, ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; @@ -56,7 +59,7 @@ pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBl pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; -pub use fork_choice::ExecutionStatus; +pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; pub use parking_lot; pub use slot_clock; diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs new file mode 100644 index 0000000000..4a7b38bdb4 --- /dev/null +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -0,0 +1,186 @@ +//! Provides tools for checking if a node is ready for the Bellatrix upgrade and following merge +//! transition. + +use crate::{BeaconChain, BeaconChainTypes}; +use serde::{Deserialize, Serialize, Serializer}; +use std::fmt; +use std::fmt::Write; +use types::*; + +/// The time before the Bellatrix fork when we will start issuing warnings about preparation. +const SECONDS_IN_A_WEEK: u64 = 604800; +pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK; + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct MergeConfig { + #[serde(serialize_with = "serialize_uint256")] + pub terminal_total_difficulty: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub terminal_block_hash: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub terminal_block_hash_epoch: Option, +} + +impl fmt::Display for MergeConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.terminal_block_hash.is_none() + && self.terminal_block_hash_epoch.is_none() + && self.terminal_total_difficulty.is_none() + { + return write!( + f, + "Merge terminal difficulty parameters not configured, check your config" + ); + } + let mut display_string = String::new(); + if let Some(terminal_total_difficulty) = self.terminal_total_difficulty { + write!( + display_string, + "terminal_total_difficulty: {},", + terminal_total_difficulty + )?; + } + if let Some(terminal_block_hash) = self.terminal_block_hash { + write!( + display_string, + "terminal_block_hash: {},", + terminal_block_hash + )?; + } + if let Some(terminal_block_hash_epoch) = self.terminal_block_hash_epoch { + write!( + display_string, + "terminal_block_hash_epoch: {},", + terminal_block_hash_epoch + )?; + } + write!(f, "{}", display_string.trim_end_matches(','))?; + Ok(()) + } +} +impl MergeConfig { + /// Instantiate `self` from the values in a `ChainSpec`. + pub fn from_chainspec(spec: &ChainSpec) -> Self { + let mut params = MergeConfig::default(); + if spec.terminal_total_difficulty != Uint256::max_value() { + params.terminal_total_difficulty = Some(spec.terminal_total_difficulty); + } + if spec.terminal_block_hash != ExecutionBlockHash::zero() { + params.terminal_block_hash = Some(spec.terminal_block_hash); + } + if spec.terminal_block_hash_activation_epoch != Epoch::max_value() { + params.terminal_block_hash_epoch = Some(spec.terminal_block_hash_activation_epoch); + } + params + } +} + +/// Indicates if a node is ready for the Bellatrix upgrade and subsequent merge transition. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] +pub enum MergeReadiness { + /// The node is ready, as far as we can tell. + Ready { + config: MergeConfig, + #[serde(serialize_with = "serialize_uint256")] + current_difficulty: Option, + }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeTransitionConfigurationFailed { error: String }, + /// The EL can be reached and has the correct configuration, however it's not yet synced. + NotSynced, + /// The user has not configured this node to use an execution endpoint. + NoExecutionEndpoint, +} + +impl fmt::Display for MergeReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MergeReadiness::Ready { + config: params, + current_difficulty, + } => { + write!( + f, + "This node appears ready for the merge. \ + Params: {}, current_difficulty: {:?}", + params, current_difficulty + ) + } + MergeReadiness::ExchangeTransitionConfigurationFailed { error } => write!( + f, + "Could not confirm the transition configuration with the \ + execution endpoint: {:?}", + error + ), + MergeReadiness::NotSynced => write!( + f, + "The execution endpoint is connected and configured, \ + however it is not yet synced" + ), + MergeReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement for the merge" + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if the Bellatrix fork has occurred or will occur within + /// `MERGE_READINESS_PREPARATION_SECONDS`. + pub fn is_time_to_prepare_for_bellatrix(&self, current_slot: Slot) -> bool { + if let Some(bellatrix_epoch) = self.spec.bellatrix_fork_epoch { + let bellatrix_slot = bellatrix_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let merge_readiness_preparation_slots = + MERGE_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + + // Return `true` if Bellatrix has happened or is within the preparation time. + current_slot + merge_readiness_preparation_slots > bellatrix_slot + } else { + // The Bellatrix fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for the merge. + pub async fn check_merge_readiness(&self) -> MergeReadiness { + if let Some(el) = self.execution_layer.as_ref() { + if let Err(e) = el.exchange_transition_configuration(&self.spec).await { + // The EL was either unreachable, responded with an error or has a different + // configuration. + return MergeReadiness::ExchangeTransitionConfigurationFailed { + error: format!("{:?}", e), + }; + } + + if !el.is_synced_for_notifier().await { + // The EL is not synced. + return MergeReadiness::NotSynced; + } + let params = MergeConfig::from_chainspec(&self.spec); + let current_difficulty = el.get_current_difficulty().await.ok(); + MergeReadiness::Ready { + config: params, + current_difficulty, + } + } else { + // There is no EL configured. + MergeReadiness::NoExecutionEndpoint + } + } +} + +/// Utility function to serialize a Uint256 as a decimal string. +fn serialize_uint256(val: &Option, s: S) -> Result +where + S: Serializer, +{ + match val { + Some(v) => v.to_string().serialize(s), + None => s.serialize_none(), + } +} diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs new file mode 100644 index 0000000000..805b61dd9c --- /dev/null +++ b/beacon_node/beacon_chain/src/otb_verification_service.rs @@ -0,0 +1,378 @@ +use crate::execution_payload::{validate_merge_block, AllowOptimisticImport}; +use crate::{ + BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, +}; +use itertools::process_results; +use proto_array::InvalidationOperation; +use slog::{crit, debug, error, info, warn}; +use slot_clock::SlotClock; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use state_processing::per_block_processing::is_merge_transition_complete; +use std::sync::Arc; +use store::{DBColumn, Error as StoreError, HotColdDB, KeyValueStore, StoreItem}; +use task_executor::{ShutdownReason, TaskExecutor}; +use tokio::time::sleep; +use tree_hash::TreeHash; +use types::{BeaconBlockRef, EthSpec, Hash256, Slot}; +use DBColumn::OptimisticTransitionBlock as OTBColumn; + +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +pub struct OptimisticTransitionBlock { + root: Hash256, + slot: Slot, +} + +impl OptimisticTransitionBlock { + // types::BeaconBlockRef<'_, ::EthSpec> + pub fn from_block(block: BeaconBlockRef) -> Self { + Self { + root: block.tree_hash_root(), + slot: block.slot(), + } + } + + pub fn root(&self) -> &Hash256 { + &self.root + } + + pub fn slot(&self) -> &Slot { + &self.slot + } + + pub fn persist_in_store(&self, store: A) -> Result<(), StoreError> + where + T: BeaconChainTypes, + A: AsRef>, + { + if store + .as_ref() + .item_exists::(&self.root)? + { + Ok(()) + } else { + store.as_ref().put_item(&self.root, self) + } + } + + pub fn remove_from_store(&self, store: A) -> Result<(), StoreError> + where + T: BeaconChainTypes, + A: AsRef>, + { + store + .as_ref() + .hot_db + .key_delete(OTBColumn.into(), self.root.as_bytes()) + } + + fn is_canonical( + &self, + chain: &BeaconChain, + ) -> Result { + Ok(chain + .forwards_iter_block_roots_until(self.slot, self.slot)? + .next() + .transpose()? + .map(|(root, _)| root) + == Some(self.root)) + } +} + +impl StoreItem for OptimisticTransitionBlock { + fn db_column() -> DBColumn { + OTBColumn + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} + +/// The routine is expected to run once per epoch, 1/4th through the epoch. +pub const EPOCH_DELAY_FACTOR: u32 = 4; + +/// Spawns a routine which checks the validity of any optimistically imported transition blocks +/// +/// This routine will run once per epoch, at `epoch_duration / EPOCH_DELAY_FACTOR` after +/// the start of each epoch. +/// +/// The service will not be started if there is no `execution_layer` on the `chain`. +pub fn start_otb_verification_service( + executor: TaskExecutor, + chain: Arc>, +) { + // Avoid spawning the service if there's no EL, it'll just error anyway. + if chain.execution_layer.is_some() { + executor.spawn( + async move { otb_verification_service(chain).await }, + "otb_verification_service", + ); + } +} + +pub fn load_optimistic_transition_blocks( + chain: &BeaconChain, +) -> Result, StoreError> { + process_results(chain.store.hot_db.iter_column(OTBColumn), |iter| { + iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) + .collect() + })? +} + +#[derive(Debug)] +pub enum Error { + ForkChoice(String), + BeaconChain(BeaconChainError), + StoreError(StoreError), + NoBlockFound(OptimisticTransitionBlock), +} + +pub async fn validate_optimistic_transition_blocks( + chain: &Arc>, + otbs: Vec, +) -> Result<(), Error> { + let finalized_slot = chain + .canonical_head + .fork_choice_read_lock() + .get_finalized_block() + .map_err(|e| Error::ForkChoice(format!("{:?}", e)))? + .slot; + + // separate otbs into + // non-canonical + // finalized canonical + // unfinalized canonical + let mut non_canonical_otbs = vec![]; + let (finalized_canonical_otbs, unfinalized_canonical_otbs) = process_results( + otbs.into_iter().map(|otb| { + otb.is_canonical(chain) + .map(|is_canonical| (otb, is_canonical)) + }), + |pair_iter| { + pair_iter + .filter_map(|(otb, is_canonical)| { + if is_canonical { + Some(otb) + } else { + non_canonical_otbs.push(otb); + None + } + }) + .partition::, _>(|otb| *otb.slot() <= finalized_slot) + }, + ) + .map_err(Error::BeaconChain)?; + + // remove non-canonical blocks that conflict with finalized checkpoint from the database + for otb in non_canonical_otbs { + if *otb.slot() <= finalized_slot { + otb.remove_from_store::(&chain.store) + .map_err(Error::StoreError)?; + } + } + + // ensure finalized canonical otb are valid, otherwise kill client + for otb in finalized_canonical_otbs { + match chain.get_block(otb.root()).await { + Ok(Some(block)) => { + match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await + { + Ok(()) => { + // merge transition block is valid, remove it from OTB + otb.remove_from_store::(&chain.store) + .map_err(Error::StoreError)?; + info!( + chain.log, + "Validated merge transition block"; + "block_root" => ?otb.root(), + "type" => "finalized" + ); + } + // The block was not able to be verified by the EL. Leave the OTB in the + // database since the EL is likely still syncing and may verify the block + // later. + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::UnverifiedNonOptimisticCandidate, + )) => (), + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, + )) => { + // Finalized Merge Transition Block is Invalid! Kill the Client! + crit!( + chain.log, + "Finalized merge transition block is invalid!"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block.canonical_root() + ); + let mut shutdown_sender = chain.shutdown_sender(); + if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + )) { + crit!( + chain.log, + "Failed to shut down client"; + "error" => ?e, + "shutdown_reason" => INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON + ); + } + } + _ => {} + } + } + Ok(None) => return Err(Error::NoBlockFound(otb)), + // Our database has pruned the payload and the payload was unavailable on the EL since + // the EL is still syncing or the payload is non-canonical. + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), + Err(e) => return Err(Error::BeaconChain(e)), + } + } + + // attempt to validate any non-finalized canonical otb blocks + for otb in unfinalized_canonical_otbs { + match chain.get_block(otb.root()).await { + Ok(Some(block)) => { + match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await + { + Ok(()) => { + // merge transition block is valid, remove it from OTB + otb.remove_from_store::(&chain.store) + .map_err(Error::StoreError)?; + info!( + chain.log, + "Validated merge transition block"; + "block_root" => ?otb.root(), + "type" => "not finalized" + ); + } + // The block was not able to be verified by the EL. Leave the OTB in the + // database since the EL is likely still syncing and may verify the block + // later. + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::UnverifiedNonOptimisticCandidate, + )) => (), + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, + )) => { + // Unfinalized Merge Transition Block is Invalid -> Run process_invalid_execution_payload + warn!( + chain.log, + "Merge transition block invalid"; + "block_root" => ?otb.root() + ); + chain + .process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: *otb.root(), + }, + ) + .await + .map_err(|e| { + warn!( + chain.log, + "Error checking merge transition block"; + "error" => ?e, + "location" => "process_invalid_execution_payload" + ); + Error::BeaconChain(e) + })?; + } + _ => {} + } + } + Ok(None) => return Err(Error::NoBlockFound(otb)), + // Our database has pruned the payload and the payload was unavailable on the EL since + // the EL is still syncing or the payload is non-canonical. + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), + Err(e) => return Err(Error::BeaconChain(e)), + } + } + + Ok(()) +} + +/// Loop until any optimistically imported merge transition blocks have been verified and +/// the merge has been finalized. +async fn otb_verification_service(chain: Arc>) { + let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; + loop { + match chain + .slot_clock + .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) + { + Some(duration) => { + let additional_delay = epoch_duration / EPOCH_DELAY_FACTOR; + sleep(duration + additional_delay).await; + + debug!( + chain.log, + "OTB verification service firing"; + ); + + if !is_merge_transition_complete( + &chain.canonical_head.cached_head().snapshot.beacon_state, + ) { + // We are pre-merge. Nothing to do yet. + continue; + } + + // load all optimistically imported transition blocks from the database + match load_optimistic_transition_blocks(chain.as_ref()) { + Ok(otbs) => { + if otbs.is_empty() { + if chain + .canonical_head + .fork_choice_read_lock() + .get_finalized_block() + .map_or(false, |block| { + block.execution_status.is_execution_enabled() + }) + { + // there are no optimistic blocks in the database, we can exit + // the service since the merge transition is finalized and we'll + // never see another transition block + break; + } else { + debug!( + chain.log, + "No optimistic transition blocks"; + "info" => "waiting for the merge transition to finalize" + ) + } + } + if let Err(e) = validate_optimistic_transition_blocks(&chain, otbs).await { + warn!( + chain.log, + "Error while validating optimistic transition blocks"; + "error" => ?e + ); + } + } + Err(e) => { + error!( + chain.log, + "Error loading optimistic transition blocks"; + "error" => ?e + ); + } + }; + } + None => { + error!(chain.log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(chain.slot_clock.slot_duration()).await; + } + }; + } + debug!( + chain.log, + "No optimistic transition blocks in database"; + "msg" => "shutting down OTB verification service" + ); +} diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index eb4c761913..a60dacdc7c 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,5 +1,6 @@ use crate::beacon_fork_choice_store::{ - PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, + PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11, + PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, }; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -7,10 +8,10 @@ use store::{DBColumn, Error, StoreItem}; use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. -pub type PersistedForkChoice = PersistedForkChoiceV8; +pub type PersistedForkChoice = PersistedForkChoiceV11; #[superstruct( - variants(V1, V7, V8), + variants(V1, V7, V8, V10, V11), variant_attributes(derive(Encode, Decode)), no_enum )] @@ -22,6 +23,10 @@ pub struct PersistedForkChoice { pub fork_choice_store: PersistedForkChoiceStoreV7, #[superstruct(only(V8))] pub fork_choice_store: PersistedForkChoiceStoreV8, + #[superstruct(only(V10))] + pub fork_choice_store: PersistedForkChoiceStoreV10, + #[superstruct(only(V11))] + pub fork_choice_store: PersistedForkChoiceStoreV11, } macro_rules! impl_store_item { @@ -45,3 +50,5 @@ macro_rules! impl_store_item { impl_store_item!(PersistedForkChoiceV1); impl_store_item!(PersistedForkChoiceV7); impl_store_item!(PersistedForkChoiceV8); +impl_store_item!(PersistedForkChoiceV10); +impl_store_item!(PersistedForkChoiceV11); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index bb1ae7c9a3..15b0f39f3a 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,4 +1,6 @@ //! Utilities for managing database schema changes. +mod migration_schema_v10; +mod migration_schema_v11; mod migration_schema_v12; mod migration_schema_v6; mod migration_schema_v7; @@ -7,7 +9,10 @@ mod migration_schema_v9; mod types; use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; -use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; +use crate::persisted_fork_choice::{ + PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7, + PersistedForkChoiceV8, +}; use crate::types::ChainSpec; use slog::{warn, Logger}; use std::path::Path; @@ -34,6 +39,12 @@ pub fn migrate_schema( migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; migrate_schema::(db, datadir, next, to, log, spec) } + // Downgrade across multiple versions by recursively migrating one step at a time. + (_, _) if to.as_u64() + 1 < from.as_u64() => { + let next = SchemaVersion(from.as_u64() - 1); + migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; + migrate_schema::(db, datadir, next, to, log, spec) + } // // Migrations from before SchemaVersion(5) are deprecated. @@ -131,14 +142,67 @@ pub fn migrate_schema( migration_schema_v9::downgrade_from_v9::(db.clone(), log)?; db.store_schema_version(to) } - // FIXME(sproul): stub for Sean's v10 migration - (SchemaVersion(9), SchemaVersion(10)) => db.store_schema_version(to), + (SchemaVersion(9), SchemaVersion(10)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?; + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + (SchemaVersion(10), SchemaVersion(9)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = migration_schema_v10::downgrade_fork_choice(fork_choice)?; + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + // Upgrade from v10 to v11 adding support for equivocating indices to fork choice. + (SchemaVersion(10), SchemaVersion(11)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = migration_schema_v11::update_fork_choice(fork_choice); + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + // Downgrade from v11 to v10 removing support for equivocating indices from fork choice. + (SchemaVersion(11), SchemaVersion(10)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = + migration_schema_v11::downgrade_fork_choice(fork_choice, log); + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } // Upgrade from v11 to v12 to store richer metadata in the attestation op pool. - (SchemaVersion(10), SchemaVersion(12)) => { + (SchemaVersion(11), SchemaVersion(12)) => { let ops = migration_schema_v12::upgrade_to_v12::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } - // Downgrade from v12 to v9 to drop richer metadata from the attestation op pool. + // Downgrade from v12 to v11 to drop richer metadata from the attestation op pool. (SchemaVersion(12), SchemaVersion(11)) => { let ops = migration_schema_v12::downgrade_from_v12::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs new file mode 100644 index 0000000000..70e0007851 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs @@ -0,0 +1,97 @@ +use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV8}; +use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV8}; +use crate::schema_change::{ + types::{SszContainerV10, SszContainerV7}, + StoreError, +}; +use proto_array::core::SszContainer; +use ssz::{Decode, Encode}; + +pub fn update_fork_choice( + mut fork_choice: PersistedForkChoiceV8, +) -> Result { + let ssz_container_v7 = SszContainerV7::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + StoreError::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + // These transformations instantiate `node.unrealized_justified_checkpoint` and + // `node.unrealized_finalized_checkpoint` to `None`. + let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); + let ssz_container: SszContainer = ssz_container_v10.into(); + fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +pub fn downgrade_fork_choice( + mut fork_choice: PersistedForkChoiceV10, +) -> Result { + let ssz_container_v10 = SszContainerV10::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + StoreError::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + let ssz_container_v7: SszContainerV7 = ssz_container_v10.into(); + fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +impl From for PersistedForkChoiceStoreV10 { + fn from(other: PersistedForkChoiceStoreV8) -> Self { + Self { + balances_cache: other.balances_cache, + time: other.time, + finalized_checkpoint: other.finalized_checkpoint, + justified_checkpoint: other.justified_checkpoint, + justified_balances: other.justified_balances, + best_justified_checkpoint: other.best_justified_checkpoint, + unrealized_justified_checkpoint: other.best_justified_checkpoint, + unrealized_finalized_checkpoint: other.finalized_checkpoint, + proposer_boost_root: other.proposer_boost_root, + } + } +} + +impl From for PersistedForkChoiceV10 { + fn from(other: PersistedForkChoiceV8) -> Self { + Self { + fork_choice: other.fork_choice, + fork_choice_store: other.fork_choice_store.into(), + } + } +} + +impl From for PersistedForkChoiceStoreV8 { + fn from(other: PersistedForkChoiceStoreV10) -> Self { + Self { + balances_cache: other.balances_cache, + time: other.time, + finalized_checkpoint: other.finalized_checkpoint, + justified_checkpoint: other.justified_checkpoint, + justified_balances: other.justified_balances, + best_justified_checkpoint: other.best_justified_checkpoint, + proposer_boost_root: other.proposer_boost_root, + } + } +} + +impl From for PersistedForkChoiceV8 { + fn from(other: PersistedForkChoiceV10) -> Self { + Self { + fork_choice: other.fork_choice, + fork_choice_store: other.fork_choice_store.into(), + } + } +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs new file mode 100644 index 0000000000..dde80a5cac --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs @@ -0,0 +1,77 @@ +use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11}; +use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV11}; +use slog::{warn, Logger}; +use std::collections::BTreeSet; + +/// Add the equivocating indices field. +pub fn update_fork_choice(fork_choice_v10: PersistedForkChoiceV10) -> PersistedForkChoiceV11 { + let PersistedForkChoiceStoreV10 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + } = fork_choice_v10.fork_choice_store; + + PersistedForkChoiceV11 { + fork_choice: fork_choice_v10.fork_choice, + fork_choice_store: PersistedForkChoiceStoreV11 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + equivocating_indices: BTreeSet::new(), + }, + } +} + +pub fn downgrade_fork_choice( + fork_choice_v11: PersistedForkChoiceV11, + log: Logger, +) -> PersistedForkChoiceV10 { + let PersistedForkChoiceStoreV11 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + equivocating_indices, + } = fork_choice_v11.fork_choice_store; + + if !equivocating_indices.is_empty() { + warn!( + log, + "Deleting slashed validators from fork choice store"; + "count" => equivocating_indices.len(), + "message" => "this may make your node more susceptible to following the wrong chain", + ); + } + + PersistedForkChoiceV10 { + fork_choice: fork_choice_v11.fork_choice, + fork_choice_store: PersistedForkChoiceStoreV10 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + }, + } +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index 9222266ba9..81147b8af6 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -2,7 +2,7 @@ use crate::beacon_chain::BeaconChainTypes; use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; -use crate::schema_change::types::{ProtoNodeV6, SszContainerV6, SszContainerV7}; +use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7}; use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::ForkChoice; @@ -86,7 +86,8 @@ pub(crate) fn update_fork_choice( // to `None`. let ssz_container_v7: SszContainerV7 = ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint); - let ssz_container: SszContainer = ssz_container_v7.into(); + let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); + let ssz_container: SszContainer = ssz_container_v10.into(); let mut fork_choice: ProtoArrayForkChoice = ssz_container.into(); update_checkpoints::(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db) @@ -97,6 +98,13 @@ pub(crate) fn update_fork_choice( update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice) .map_err(StoreError::SchemaMigrationError)?; + // Need to downgrade the SSZ container to V7 so that all migrations can be applied in sequence. + let ssz_container = SszContainer::from(&fork_choice); + let ssz_container_v7 = SszContainerV7::from(ssz_container); + + persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); + persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; + Ok(()) } @@ -301,8 +309,6 @@ fn update_store_justified_checkpoint( .ok_or("Proto node with current finalized checkpoint not found")?; fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint; - persisted_fork_choice.fork_choice.proto_array_bytes = fork_choice.as_bytes(); - persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; Ok(()) } diff --git a/beacon_node/beacon_chain/src/schema_change/types.rs b/beacon_node/beacon_chain/src/schema_change/types.rs index 8d41a384f6..02a54c1a3f 100644 --- a/beacon_node/beacon_chain/src/schema_change/types.rs +++ b/beacon_node/beacon_chain/src/schema_change/types.rs @@ -12,7 +12,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); #[superstruct( - variants(V1, V6, V7), + variants(V1, V6, V7, V10), variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)), no_enum )] @@ -30,18 +30,24 @@ pub struct ProtoNode { #[superstruct(only(V1, V6))] pub finalized_epoch: Epoch, #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub justified_checkpoint: Option, #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub finalized_checkpoint: Option, pub weight: u64, #[ssz(with = "four_byte_option_usize")] pub best_child: Option, #[ssz(with = "four_byte_option_usize")] pub best_descendant: Option, - #[superstruct(only(V6, V7))] + #[superstruct(only(V6, V7, V10))] pub execution_status: ExecutionStatus, + #[ssz(with = "four_byte_option_checkpoint")] + #[superstruct(only(V10))] + pub unrealized_justified_checkpoint: Option, + #[ssz(with = "four_byte_option_checkpoint")] + #[superstruct(only(V10))] + pub unrealized_finalized_checkpoint: Option, } impl Into for ProtoNodeV1 { @@ -88,9 +94,31 @@ impl Into for ProtoNodeV6 { } } -impl Into for ProtoNodeV7 { - fn into(self) -> ProtoNode { - ProtoNode { +impl Into for ProtoNodeV7 { + fn into(self) -> ProtoNodeV10 { + ProtoNodeV10 { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, + } + } +} + +impl Into for ProtoNodeV10 { + fn into(self) -> ProtoNodeV7 { + ProtoNodeV7 { slot: self.slot, state_root: self.state_root, target_root: self.target_root, @@ -108,8 +136,50 @@ impl Into for ProtoNodeV7 { } } +impl Into for ProtoNodeV10 { + fn into(self) -> ProtoNode { + ProtoNode { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + } + } +} + +impl From for ProtoNodeV7 { + fn from(container: ProtoNode) -> Self { + Self { + slot: container.slot, + state_root: container.state_root, + target_root: container.target_root, + current_epoch_shuffling_id: container.current_epoch_shuffling_id, + next_epoch_shuffling_id: container.next_epoch_shuffling_id, + root: container.root, + parent: container.parent, + justified_checkpoint: container.justified_checkpoint, + finalized_checkpoint: container.finalized_checkpoint, + weight: container.weight, + best_child: container.best_child, + best_descendant: container.best_descendant, + execution_status: container.execution_status, + } + } +} + #[superstruct( - variants(V1, V6, V7), + variants(V1, V6, V7, V10), variant_attributes(derive(Encode, Decode)), no_enum )] @@ -122,9 +192,9 @@ pub struct SszContainer { pub justified_epoch: Epoch, #[superstruct(only(V1, V6))] pub finalized_epoch: Epoch, - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub justified_checkpoint: Checkpoint, - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub finalized_checkpoint: Checkpoint, #[superstruct(only(V1))] pub nodes: Vec, @@ -132,8 +202,10 @@ pub struct SszContainer { pub nodes: Vec, #[superstruct(only(V7))] pub nodes: Vec, + #[superstruct(only(V10))] + pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub previous_proposer_boost: ProposerBoost, } @@ -174,7 +246,41 @@ impl SszContainerV6 { } } -impl Into for SszContainerV7 { +impl Into for SszContainerV7 { + fn into(self) -> SszContainerV10 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV10 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + } + } +} + +impl Into for SszContainerV10 { + fn into(self) -> SszContainerV7 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV7 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + } + } +} + +impl Into for SszContainerV10 { fn into(self) -> SszContainer { let nodes = self.nodes.into_iter().map(Into::into).collect(); @@ -190,3 +296,20 @@ impl Into for SszContainerV7 { } } } + +impl From for SszContainerV7 { + fn from(container: SszContainer) -> Self { + let nodes = container.nodes.into_iter().map(Into::into).collect(); + + Self { + votes: container.votes, + balances: container.balances, + prune_threshold: container.prune_threshold, + justified_checkpoint: container.justified_checkpoint, + finalized_checkpoint: container.finalized_checkpoint, + nodes, + indices: container.indices, + previous_proposer_boost: container.previous_proposer_boost, + } + } +} diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 5abec98877..48c0f2f8a2 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -220,14 +220,7 @@ async fn state_advance_timer( return; } - if let Err(e) = beacon_chain.recompute_head_at_slot(next_slot).await { - warn!( - log, - "Error updating fork choice for next slot"; - "error" => ?e, - "slot" => next_slot, - ); - } + beacon_chain.recompute_head_at_slot(next_slot).await; // Use a blocking task to avoid blocking the core executor whilst waiting for locks // in `ForkChoiceSignalTx`. diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 579bd3194b..8ec196eb1d 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -11,10 +11,15 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; +use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ - test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK}, + auth::JwtKey, + test_utils::{ + ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, + }, ExecutionLayer, }; +use fork_choice::CountUnrealized; use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; @@ -28,12 +33,14 @@ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::Logger; use slot_clock::TestingSlotClock; +use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::{ state_advance::{complete_state_advance, partial_state_advance}, StateRootStrategy, }; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::fmt; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -149,6 +156,7 @@ pub struct Builder { store_mutator: Option>, execution_layer: Option>, mock_execution_layer: Option>, + mock_builder: Option>, runtime: TestRuntime, log: Logger, } @@ -206,6 +214,20 @@ impl Builder> { self.store = Some(store); self.store_mutator(Box::new(mutator)) } + + /// Manually restore from a given `MemoryStore`. + pub fn resumed_ephemeral_store( + mut self, + store: Arc, MemoryStore>>, + ) -> Self { + let mutator = move |builder: BeaconChainBuilder<_>| { + builder + .resume_from_db() + .expect("should resume from database") + }; + self.store = Some(store); + self.store_mutator(Box::new(mutator)) + } } impl Builder> { @@ -266,6 +288,7 @@ where store_mutator: None, execution_layer: None, mock_execution_layer: None, + mock_builder: None, runtime, log, } @@ -361,6 +384,7 @@ where DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), None, ); self.execution_layer = Some(mock.el.clone()); @@ -368,6 +392,38 @@ where self } + pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { + // Get a random unused port + let port = unused_port::unused_tcp_port().unwrap(); + let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); + + let spec = self.spec.clone().expect("cannot build without spec"); + let mock_el = MockExecutionLayer::new( + self.runtime.task_executor.clone(), + spec.terminal_total_difficulty, + DEFAULT_TERMINAL_BLOCK, + spec.terminal_block_hash, + spec.terminal_block_hash_activation_epoch, + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + Some(builder_url.clone()), + ) + .move_to_terminal_block(); + + let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); + + self.mock_builder = Some(TestingBuilder::new( + mock_el_url, + builder_url, + beacon_url, + spec, + self.runtime.task_executor.clone(), + )); + self.execution_layer = Some(mock_el.el.clone()); + self.mock_execution_layer = Some(mock_el); + + self + } + /// Instruct the mock execution engine to always return a "valid" response to any payload it is /// asked to execute. pub fn mock_execution_layer_all_payloads_valid(self) -> Self { @@ -436,6 +492,7 @@ where shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, + mock_builder: self.mock_builder.map(Arc::new), rng: make_rng(), } } @@ -454,6 +511,7 @@ pub struct BeaconChainHarness { pub runtime: TestRuntime, pub mock_execution_layer: Option>, + pub mock_builder: Option>>, pub rng: Mutex, } @@ -518,6 +576,11 @@ where self.chain.head_beacon_state_cloned() } + pub fn get_timestamp_at_slot(&self) -> u64 { + let state = self.get_current_state(); + compute_timestamp_at_slot(&state, &self.spec).unwrap() + } + pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); @@ -1360,9 +1423,12 @@ where block: SignedBeaconBlock, ) -> Result> { self.set_current_slot(slot); - let block_hash: SignedBeaconBlockHash = - self.chain.process_block(Arc::new(block)).await?.into(); - self.chain.recompute_head_at_current_slot().await?; + let block_hash: SignedBeaconBlockHash = self + .chain + .process_block(Arc::new(block), CountUnrealized::True) + .await? + .into(); + self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -1370,9 +1436,12 @@ where &self, block: SignedBeaconBlock, ) -> Result> { - let block_hash: SignedBeaconBlockHash = - self.chain.process_block(Arc::new(block)).await?.into(); - self.chain.recompute_head_at_current_slot().await?; + let block_hash: SignedBeaconBlockHash = self + .chain + .process_block(Arc::new(block), CountUnrealized::True) + .await? + .into(); + self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -1769,3 +1838,10 @@ where (honest_head, faulty_head) } } + +// Junk `Debug` impl to satistfy certain trait bounds during testing. +impl fmt::Debug for BeaconChainHarness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "BeaconChainHarness") + } +} diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 4b3e1e72fe..88d6914036 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -4,6 +4,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult}; +use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; use slasher::{Config as SlasherConfig, Slasher}; @@ -147,23 +148,19 @@ async fn chain_segment_full_segment() { // Sneak in a little check to ensure we can process empty chain segments. harness .chain - .process_chain_segment(vec![]) + .process_chain_segment(vec![], CountUnrealized::True) .await .into_block_error() .expect("should import empty chain segment"); harness .chain - .process_chain_segment(blocks.clone()) + .process_chain_segment(blocks.clone(), CountUnrealized::True) .await .into_block_error() .expect("should import chain segment"); - harness - .chain - .recompute_head_at_current_slot() - .await - .expect("should run fork choice"); + harness.chain.recompute_head_at_current_slot().await; assert_eq!( harness.head_block_root(), @@ -187,17 +184,13 @@ async fn chain_segment_varying_chunk_size() { for chunk in blocks.chunks(*chunk_size) { harness .chain - .process_chain_segment(chunk.to_vec()) + .process_chain_segment(chunk.to_vec(), CountUnrealized::True) .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); } - harness - .chain - .recompute_head_at_current_slot() - .await - .expect("should run fork choice"); + harness.chain.recompute_head_at_current_slot().await; assert_eq!( harness.head_block_root(), @@ -227,7 +220,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -247,7 +240,7 @@ async fn chain_segment_non_linear_parent_roots() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::NonLinearParentRoots) @@ -278,7 +271,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -299,7 +292,7 @@ async fn chain_segment_non_linear_slots() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::NonLinearSlots) @@ -325,7 +318,7 @@ async fn assert_invalid_signature( matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -342,12 +335,18 @@ async fn assert_invalid_signature( .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been // imported prior to this test. - let _ = harness.chain.process_chain_segment(ancestor_blocks).await; + let _ = harness + .chain + .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .await; assert!( matches!( harness .chain - .process_block(snapshots[block_index].beacon_block.clone()) + .process_block( + snapshots[block_index].beacon_block.clone(), + CountUnrealized::True + ) .await, Err(BlockError::InvalidSignature) ), @@ -397,7 +396,7 @@ async fn invalid_signature_gossip_block() { .collect(); harness .chain - .process_chain_segment(ancestor_blocks) + .process_chain_segment(ancestor_blocks, CountUnrealized::True) .await .into_block_error() .expect("should import all blocks prior to the one being tested"); @@ -405,10 +404,10 @@ async fn invalid_signature_gossip_block() { matches!( harness .chain - .process_block(Arc::new(SignedBeaconBlock::from_block( - block, - junk_signature() - ))) + .process_block( + Arc::new(SignedBeaconBlock::from_block(block, junk_signature())), + CountUnrealized::True + ) .await, Err(BlockError::InvalidSignature) ), @@ -441,7 +440,7 @@ async fn invalid_signature_block_proposal() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -639,7 +638,7 @@ async fn invalid_signature_deposit() { !matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) .await .into_block_error(), Err(BlockError::InvalidSignature) @@ -716,11 +715,14 @@ async fn block_gossip_verification() { harness .chain - .process_block(gossip_verified) + .process_block(gossip_verified, CountUnrealized::True) .await .expect("should import valid gossip verified block"); } + // Recompute the head to ensure we cache the latest view of fork choice. + harness.chain.recompute_head_at_current_slot().await; + /* * This test ensures that: * @@ -978,7 +980,11 @@ async fn verify_block_for_gossip_slashing_detection() { .verify_block_for_gossip(Arc::new(block1)) .await .unwrap(); - harness.chain.process_block(verified_block).await.unwrap(); + harness + .chain + .process_block(verified_block, CountUnrealized::True) + .await + .unwrap(); unwrap_err( harness .chain @@ -1009,7 +1015,11 @@ async fn verify_block_for_gossip_doppelganger_detection() { .await .unwrap(); let attestations = verified_block.block.message().body().attestations().clone(); - harness.chain.process_block(verified_block).await.unwrap(); + harness + .chain + .process_block(verified_block, CountUnrealized::True) + .await + .unwrap(); for att in attestations.iter() { let epoch = att.data.target.epoch; @@ -1148,7 +1158,7 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_block(Arc::new(base_block.clone())) + .process_block(Arc::new(base_block.clone()), CountUnrealized::True) .await .err() .expect("should error when processing base block"), @@ -1162,7 +1172,7 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(base_block)]) + .process_chain_segment(vec![Arc::new(base_block)], CountUnrealized::True) .await, ChainSegmentResult::Failed { imported_blocks: 0, @@ -1276,7 +1286,7 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_block(Arc::new(altair_block.clone())) + .process_block(Arc::new(altair_block.clone()), CountUnrealized::True) .await .err() .expect("should error when processing altair block"), @@ -1290,7 +1300,7 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_chain_segment(vec![Arc::new(altair_block)]) + .process_chain_segment(vec![Arc::new(altair_block)], CountUnrealized::True) .await, ChainSegmentResult::Failed { imported_blocks: 0, diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index 91d5eb21ca..19e8902a3e 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -1,7 +1,7 @@ #![cfg(not(debug_assertions))] // Tests run too slow in debug. use beacon_chain::test_utils::BeaconChainHarness; -use execution_layer::test_utils::{generate_pow_block, DEFAULT_TERMINAL_BLOCK}; +use execution_layer::test_utils::{generate_pow_block, Block, DEFAULT_TERMINAL_BLOCK}; use types::*; const VALIDATOR_COUNT: usize = 32; @@ -22,6 +22,7 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { prev_ep.execution_payload.block_number + 1, ep.execution_payload.block_number ); + assert!(ep.execution_payload.timestamp > prev_ep.execution_payload.timestamp); } prev_ep = Some(ep.clone()); } @@ -169,6 +170,30 @@ async fn base_altair_merge_with_terminal_block_after_fork() { .move_to_terminal_block() .unwrap(); + // Add a slot duration to get to the next slot + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + + harness.extend_slots(1).await; + + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert_eq!( + *one_after_merge_head + .message() + .body() + .execution_payload() + .unwrap(), + FullPayload::default() + ); + assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2); + /* * Next merge block should include an exec payload. */ diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index e37ed286bc..5e03ef2335 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,17 +1,27 @@ #![cfg(not(debug_assertions))] +use beacon_chain::otb_verification_service::{ + load_optimistic_transition_blocks, validate_optimistic_transition_blocks, + OptimisticTransitionBlock, +}; use beacon_chain::{ + canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, + test_utils::ExecutionBlockGenerator, ExecutionLayer, ForkChoiceState, PayloadAttributes, }; -use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; +use fork_choice::{ + CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, +}; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; +use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use task_executor::ShutdownReason; @@ -40,7 +50,11 @@ struct InvalidPayloadRig { impl InvalidPayloadRig { fn new() -> Self { - let mut spec = E::default_spec(); + let spec = E::default_spec(); + Self::new_with_spec(spec) + } + + fn new_with_spec(mut spec: ChainSpec) -> Self { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); @@ -93,11 +107,15 @@ impl InvalidPayloadRig { } async fn recompute_head(&self) { - self.harness - .chain - .recompute_head_at_current_slot() - .await - .unwrap(); + self.harness.chain.recompute_head_at_current_slot().await; + } + + fn cached_head(&self) -> CachedHead { + self.harness.chain.canonical_head.cached_head() + } + + fn canonical_head(&self) -> &CanonicalHead> { + &self.harness.chain.canonical_head } fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { @@ -273,7 +291,7 @@ impl InvalidPayloadRig { let execution_status = self.execution_status(root.into()); match forkchoice_response { - Payload::Syncing => assert!(execution_status.is_optimistic()), + Payload::Syncing => assert!(execution_status.is_strictly_optimistic()), Payload::Valid => assert!(execution_status.is_valid_and_post_bellatrix()), Payload::Invalid { .. } | Payload::InvalidBlockHash @@ -352,6 +370,19 @@ impl InvalidPayloadRig { .await .unwrap(); } + + fn assert_get_head_error_contains(&self, s: &str) { + match self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .get_head(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) + { + Err(ForkChoiceError::ProtoArrayError(e)) if e.contains(s) => (), + other => panic!("expected {} error, got {:?}", s, other), + }; + } } /// Simple test of the different import types. @@ -390,7 +421,7 @@ async fn invalid_payload_invalidates_parent() { }) .await; - assert!(rig.execution_status(roots[0]).is_valid_and_post_bellatrix()); + assert!(rig.execution_status(roots[0]).is_strictly_optimistic()); assert!(rig.execution_status(roots[1]).is_invalid()); assert!(rig.execution_status(roots[2]).is_invalid()); @@ -524,7 +555,7 @@ async fn pre_finalized_latest_valid_hash() { if slot == 1 { assert!(rig.execution_status(root).is_valid_and_post_bellatrix()); } else { - assert!(rig.execution_status(root).is_optimistic()); + assert!(rig.execution_status(root).is_strictly_optimistic()); } } } @@ -532,9 +563,9 @@ async fn pre_finalized_latest_valid_hash() { /// Ensure that a `latest_valid_hash` will: /// /// - Invalidate descendants of `latest_valid_root`. -/// - Validate `latest_valid_root` and its ancestors. +/// - Will not validate `latest_valid_root` and its ancestors. #[tokio::test] -async fn latest_valid_hash_will_validate() { +async fn latest_valid_hash_will_not_validate() { const LATEST_VALID_SLOT: u64 = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -571,8 +602,10 @@ async fn latest_valid_hash_will_validate() { assert!(execution_status.is_invalid()) } else if slot == 0 { assert!(execution_status.is_irrelevant()) - } else { + } else if slot == 1 { assert!(execution_status.is_valid_and_post_bellatrix()) + } else { + assert!(execution_status.is_strictly_optimistic()) } } } @@ -613,7 +646,7 @@ async fn latest_valid_hash_is_junk() { if slot == 1 { assert!(rig.execution_status(root).is_valid_and_post_bellatrix()); } else { - assert!(rig.execution_status(root).is_optimistic()); + assert!(rig.execution_status(root).is_strictly_optimistic()); } } } @@ -646,7 +679,7 @@ async fn invalidates_all_descendants() { let fork_block_root = rig .harness .chain - .process_block(Arc::new(fork_block)) + .process_block(Arc::new(fork_block), CountUnrealized::True) .await .unwrap(); rig.recompute_head().await; @@ -693,9 +726,15 @@ async fn invalidates_all_descendants() { } let execution_status = rig.execution_status(root); - if slot <= latest_valid_slot { - // Blocks prior to the latest valid hash are valid. + if slot == 0 { + // Genesis block is pre-bellatrix. + assert!(execution_status.is_irrelevant()); + } else if slot == 1 { + // First slot was imported as valid. assert!(execution_status.is_valid_and_post_bellatrix()); + } else if slot <= latest_valid_slot { + // Blocks prior to and included the latest valid hash are not marked as valid. + assert!(execution_status.is_strictly_optimistic()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); @@ -732,7 +771,7 @@ async fn switches_heads() { let fork_block_root = rig .harness .chain - .process_block(Arc::new(fork_block)) + .process_block(Arc::new(fork_block), CountUnrealized::True) .await .unwrap(); rig.recompute_head().await; @@ -752,7 +791,9 @@ async fn switches_heads() { assert_eq!(rig.harness.head_block_root(), fork_block_root); // The fork block has not yet been validated. - assert!(rig.execution_status(fork_block_root).is_optimistic()); + assert!(rig + .execution_status(fork_block_root) + .is_strictly_optimistic()); for root in blocks { let slot = rig @@ -769,9 +810,15 @@ async fn switches_heads() { } let execution_status = rig.execution_status(root); - if slot <= latest_valid_slot { - // Blocks prior to the latest valid hash are valid. + if slot == 0 { + // Genesis block is pre-bellatrix. + assert!(execution_status.is_irrelevant()); + } else if slot == 1 { + // First slot was imported as valid. assert!(execution_status.is_valid_and_post_bellatrix()); + } else if slot <= latest_valid_slot { + // Blocks prior to and included the latest valid hash are not marked as valid. + assert!(execution_status.is_strictly_optimistic()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); @@ -854,8 +901,8 @@ async fn manually_validate_child() { let parent = rig.import_block(Payload::Syncing).await; let child = rig.import_block(Payload::Syncing).await; - assert!(rig.execution_status(parent).is_optimistic()); - assert!(rig.execution_status(child).is_optimistic()); + assert!(rig.execution_status(parent).is_strictly_optimistic()); + assert!(rig.execution_status(child).is_strictly_optimistic()); rig.validate_manually(child); @@ -872,13 +919,13 @@ async fn manually_validate_parent() { let parent = rig.import_block(Payload::Syncing).await; let child = rig.import_block(Payload::Syncing).await; - assert!(rig.execution_status(parent).is_optimistic()); - assert!(rig.execution_status(child).is_optimistic()); + assert!(rig.execution_status(parent).is_strictly_optimistic()); + assert!(rig.execution_status(child).is_strictly_optimistic()); rig.validate_manually(parent); assert!(rig.execution_status(parent).is_valid_and_post_bellatrix()); - assert!(rig.execution_status(child).is_optimistic()); + assert!(rig.execution_status(child).is_strictly_optimistic()); } #[tokio::test] @@ -970,7 +1017,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.clone()).await, + rig.harness.chain.process_block(block.clone(), CountUnrealized::True).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); @@ -984,7 +1031,8 @@ async fn invalid_parent() { Duration::from_secs(0), &state, PayloadVerificationStatus::Optimistic, - &rig.harness.chain.spec + &rig.harness.chain.spec, + CountUnrealized::True, ), Err(ForkChoiceError::ProtoArrayError(message)) if message.contains(&format!( @@ -1003,6 +1051,11 @@ async fn payload_preparation_before_transition_block() { let rig = InvalidPayloadRig::new(); let el = rig.execution_layer(); + // Run the watchdog routine so that the status of the execution engine is set. This ensures + // that we don't end up with `eth_syncing` requests later in this function that will impede + // testing. + el.watchdog_task().await; + let head = rig.harness.chain.head_snapshot(); assert_eq!( head.beacon_block @@ -1073,7 +1126,7 @@ async fn attesting_to_optimistic_head() { "the head should be the latest imported block" ); assert!( - rig.execution_status(root).is_optimistic(), + rig.execution_status(root).is_strictly_optimistic(), "the head should be optimistic" ); @@ -1161,3 +1214,778 @@ async fn attesting_to_optimistic_head() { get_aggregated().unwrap(); get_aggregated_by_slot_and_root().unwrap(); } + +/// A helper struct to build out a chain of some configurable length which undergoes the merge +/// transition. +struct OptimisticTransitionSetup { + blocks: Vec>>, + execution_block_generator: ExecutionBlockGenerator, +} + +impl OptimisticTransitionSetup { + async fn new(num_blocks: usize, ttd: u64) -> Self { + let mut spec = E::default_spec(); + spec.terminal_total_difficulty = ttd.into(); + let mut rig = InvalidPayloadRig::new_with_spec(spec).enable_attestations(); + rig.move_to_terminal_block(); + + let mut blocks = Vec::with_capacity(num_blocks); + for _ in 0..num_blocks { + let root = rig.import_block(Payload::Valid).await; + let block = rig.harness.chain.get_block(&root).await.unwrap().unwrap(); + blocks.push(Arc::new(block)); + } + + let execution_block_generator = rig + .harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .execution_block_generator() + .clone(); + + Self { + blocks, + execution_block_generator, + } + } +} + +/// Build a chain which has optimistically imported a transition block. +/// +/// The initial chain will be built with respect to `block_ttd`, whilst the `rig` which imports the +/// chain will operate with respect to `rig_ttd`. This allows for testing mismatched TTDs. +async fn build_optimistic_chain( + block_ttd: u64, + rig_ttd: u64, + num_blocks: usize, +) -> InvalidPayloadRig { + let OptimisticTransitionSetup { + blocks, + execution_block_generator, + } = OptimisticTransitionSetup::new(num_blocks, block_ttd).await; + // Build a brand-new testing harness. We will apply the blocks from the previous harness to + // this one. + let mut spec = E::default_spec(); + spec.terminal_total_difficulty = rig_ttd.into(); + let rig = InvalidPayloadRig::new_with_spec(spec); + + let spec = &rig.harness.chain.spec; + let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); + + // Ensure all the execution blocks from the first rig are available in the second rig. + *mock_execution_layer.server.execution_block_generator() = execution_block_generator; + + // Make the execution layer respond `SYNCING` to all `newPayload` requests. + mock_execution_layer + .server + .all_payloads_syncing_on_new_payload(true); + // Make the execution layer respond `SYNCING` to all `forkchoiceUpdated` requests. + mock_execution_layer + .server + .all_payloads_syncing_on_forkchoice_updated(); + // Make the execution layer respond `None` to all `getBlockByHash` requests. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_none(); + + let current_slot = std::cmp::max( + blocks[0].slot() + spec.safe_slots_to_import_optimistically, + num_blocks.into(), + ); + rig.harness.set_current_slot(current_slot); + + for block in blocks { + rig.harness + .chain + .process_block(block, CountUnrealized::True) + .await + .unwrap(); + } + + rig.harness.chain.recompute_head_at_current_slot().await; + + // Make the execution layer respond normally to `getBlockByHash` requests. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_natural_value(); + + // Perform some sanity checks to ensure that the transition happened exactly where we expected. + let pre_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(0), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let pre_transition_block = rig + .harness + .chain + .get_block(&pre_transition_block_root) + .await + .unwrap() + .unwrap(); + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + assert_eq!( + pre_transition_block_root, + post_transition_block.parent_root(), + "the blocks form a single chain" + ); + assert!( + pre_transition_block + .message() + .body() + .execution_payload() + .unwrap() + .execution_payload + == <_>::default(), + "the block *has not* undergone the merge transition" + ); + assert!( + post_transition_block + .message() + .body() + .execution_payload() + .unwrap() + .execution_payload + != <_>::default(), + "the block *has* undergone the merge transition" + ); + + // Assert that the transition block was optimistically imported. + // + // Note: we're using the "fallback" check for optimistic status, so if the block was + // pre-finality then we'll just use the optimistic status of the finalized block. + assert!( + rig.harness + .chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&post_transition_block_root) + .unwrap(), + "the transition block should be imported optimistically" + ); + + // Get the mock execution layer to respond to `getBlockByHash` requests normally again. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_natural_value(); + + return rig; +} + +#[tokio::test] +async fn optimistic_transition_block_valid_unfinalized() { + let ttd = 42; + let num_blocks = 16 as usize; + let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + < post_transition_block.slot(), + "the transition block should not be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + valid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .expect("should validate fine"); + // now that the transition block has been validated, it should have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert!( + otbs.is_empty(), + "The valid optimistic transition block should have been removed from the database", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_valid_finalized() { + let ttd = 42; + let num_blocks = 130 as usize; + let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + > post_transition_block.slot(), + "the transition block should be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + valid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .expect("should validate fine"); + // now that the transition block has been validated, it should have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert!( + otbs.is_empty(), + "The valid optimistic transition block should have been removed from the database", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_invalid_unfinalized() { + let block_ttd = 42; + let rig_ttd = 1337; + let num_blocks = 22 as usize; + let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + < post_transition_block.slot(), + "the transition block should not be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + + let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // No shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It shouldn't be known as invalid yet + assert!(!rig + .execution_status(post_transition_block_root) + .is_invalid()); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .unwrap(); + + // Still no shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It should be marked invalid now + assert!(rig + .execution_status(post_transition_block_root) + .is_invalid()); + + // the invalid merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The invalid merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { + let block_ttd = 42; + let rig_ttd = 1337; + let num_blocks = 22 as usize; + let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + < post_transition_block.slot(), + "the transition block should not be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + + let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // No shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It shouldn't be known as invalid yet + assert!(!rig + .execution_status(post_transition_block_root) + .is_invalid()); + + // Make the execution layer respond `None` to all `getBlockByHash` requests to simulate a + // syncing EE. + let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_none(); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .unwrap(); + + // Still no shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + + // It should still be marked as optimistic. + assert!(rig + .execution_status(post_transition_block_root) + .is_strictly_optimistic()); + + // the optimistic merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The optimistic merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // Allow the EL to respond to `getBlockByHash`, as if it has finished syncing. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_natural_value(); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .unwrap(); + + // Still no shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It should be marked invalid now + assert!(rig + .execution_status(post_transition_block_root) + .is_invalid()); + + // the invalid merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The invalid merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_invalid_finalized() { + let block_ttd = 42; + let rig_ttd = 1337; + let num_blocks = 130 as usize; + let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + > post_transition_block.slot(), + "the transition block should be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + + let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // No shutdown should've been triggered yet. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .expect("should invalidate merge transition block and shutdown the client"); + + // The beacon chain should have triggered a shutdown. + assert_eq!( + rig.harness.shutdown_reasons(), + vec![ShutdownReason::Failure( + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON + )] + ); + + // the invalid merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The invalid merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); +} + +/// Helper for running tests where we generate a chain with an invalid head and then some +/// `fork_blocks` to recover it. +struct InvalidHeadSetup { + rig: InvalidPayloadRig, + fork_blocks: Vec>>, + invalid_head: CachedHead, +} + +impl InvalidHeadSetup { + async fn new() -> InvalidHeadSetup { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + + // Import blocks until the first time the chain finalizes. + while rig.cached_head().finalized_checkpoint().epoch == 0 { + rig.import_block(Payload::Syncing).await; + } + + let invalid_head = rig.cached_head(); + + // Invalidate the head block. + rig.invalidate_manually(invalid_head.head_block_root()) + .await; + assert!(rig + .canonical_head() + .head_execution_status() + .unwrap() + .is_invalid()); + + // Finding a new head should fail since the only possible head is not valid. + rig.assert_get_head_error_contains("InvalidBestNode"); + + // Build three "fork" blocks that conflict with the current canonical head. Don't apply them to + // the chain yet. + let mut fork_blocks = vec![]; + let mut parent_state = rig + .harness + .chain + .state_at_slot( + invalid_head.head_slot() - 3, + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + for _ in 0..3 { + let slot = parent_state.slot() + 1; + let (fork_block, post_state) = rig.harness.make_block(parent_state, slot).await; + parent_state = post_state; + fork_blocks.push(Arc::new(fork_block)) + } + + Self { + rig, + fork_blocks, + invalid_head, + } + } +} + +#[tokio::test] +async fn recover_from_invalid_head_by_importing_blocks() { + let InvalidHeadSetup { + rig, + fork_blocks, + invalid_head, + } = InvalidHeadSetup::new().await; + + // Import the first two blocks, they should not become the head. + for i in 0..2 { + if i == 0 { + // The first block should be `VALID` during import. + rig.harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .all_payloads_valid_on_new_payload(); + } else { + // All blocks after the first block should return `SYNCING`. + rig.harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .all_payloads_syncing_on_new_payload(true); + } + + rig.harness + .chain + .process_block(fork_blocks[i].clone(), CountUnrealized::True) + .await + .unwrap(); + rig.recompute_head().await; + rig.assert_get_head_error_contains("InvalidBestNode"); + let new_head = rig.cached_head(); + assert_eq!( + new_head.head_block_root(), + invalid_head.head_block_root(), + "the head should not change" + ); + } + + // Import the third block, it should become the head. + rig.harness + .chain + .process_block(fork_blocks[2].clone(), CountUnrealized::True) + .await + .unwrap(); + rig.recompute_head().await; + let new_head = rig.cached_head(); + assert_eq!( + new_head.head_block_root(), + fork_blocks[2].canonical_root(), + "the third block should become the head" + ); + + let manual_get_head = rig + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .get_head(rig.harness.chain.slot().unwrap(), &rig.harness.chain.spec) + .unwrap(); + assert_eq!(manual_get_head, new_head.head_block_root(),); +} + +#[tokio::test] +async fn recover_from_invalid_head_after_persist_and_reboot() { + let InvalidHeadSetup { + rig, + fork_blocks: _, + invalid_head, + } = InvalidHeadSetup::new().await; + + // Forcefully persist the head and fork choice. + rig.harness.chain.persist_head_and_fork_choice().unwrap(); + + let resumed = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .resumed_ephemeral_store(rig.harness.chain.store.clone()) + .mock_execution_layer() + .build(); + + // Forget the original rig so we don't accidentally use it again. + drop(rig); + + let resumed_head = resumed.chain.canonical_head.cached_head(); + assert_eq!( + resumed_head.head_block_root(), + invalid_head.head_block_root(), + "the resumed harness should have the invalid block as the head" + ); + assert!( + resumed + .chain + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&resumed_head.head_block_root()) + .unwrap() + .is_strictly_optimistic(), + "the invalid block should have become optimistic" + ); +} + +#[tokio::test] +async fn weights_after_resetting_optimistic_status() { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + + let mut roots = vec![]; + for _ in 0..4 { + roots.push(rig.import_block(Payload::Syncing).await); + } + + rig.recompute_head().await; + let head = rig.cached_head(); + + let original_weights = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .iter_nodes(&head.head_block_root()) + .map(|node| (node.root, node.weight)) + .collect::>(); + + rig.invalidate_manually(roots[1]).await; + + rig.harness + .chain + .canonical_head + .fork_choice_write_lock() + .proto_array_mut() + .set_all_blocks_to_optimistic::(&rig.harness.chain.spec) + .unwrap(); + + let new_weights = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .iter_nodes(&head.head_block_root()) + .map(|node| (node.root, node.weight)) + .collect::>(); + + assert_eq!(original_weights, new_weights); + + // Advance the current slot and run fork choice to remove proposer boost. + rig.harness + .set_current_slot(rig.harness.chain.slot().unwrap() + 1); + rig.recompute_head().await; + + assert_eq!( + rig.harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block_weight(&head.head_block_root()) + .unwrap(), + head.snapshot.beacon_state.validators()[0].effective_balance, + "proposer boost should be removed from the head block and the vote of a single validator applied" + ); + + // Import a length of chain to ensure the chain can be built atop. + for _ in 0..E::slots_per_epoch() * 4 { + rig.import_block(Payload::Valid).await; + } +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 560e865a8f..d9d5ca20d7 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -10,6 +10,7 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler, WhenSlotSkipped, }; +use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; @@ -2124,10 +2125,10 @@ async fn weak_subjectivity_sync() { beacon_chain.slot_clock.set_slot(block.slot().as_u64()); beacon_chain - .process_block(Arc::new(full_block)) + .process_block(Arc::new(full_block), CountUnrealized::True) .await .unwrap(); - beacon_chain.recompute_head_at_current_slot().await.unwrap(); + beacon_chain.recompute_head_at_current_slot().await; // Check that the new block's state can be loaded correctly. let state_root = block.state_root(); @@ -2459,11 +2460,7 @@ async fn revert_minority_fork_on_resume() { .build(); // Head should now be just before the fork. - resumed_harness - .chain - .recompute_head_at_current_slot() - .await - .unwrap(); + resumed_harness.chain.recompute_head_at_current_slot().await; assert_eq!(resumed_harness.head_slot(), fork_slot - 1); // Head track should know the canonical head and the rogue head. @@ -2481,11 +2478,7 @@ async fn revert_minority_fork_on_resume() { .unwrap(); // The canonical head should be the block from the majority chain. - resumed_harness - .chain - .recompute_head_at_current_slot() - .await - .unwrap(); + resumed_harness.chain.recompute_head_at_current_slot().await; assert_eq!(resumed_harness.head_slot(), block.slot()); assert_eq!(resumed_harness.head_block_root(), block.canonical_root()); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index f98580db3f..f7d443748d 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -8,6 +8,7 @@ use beacon_chain::{ }, BeaconChain, StateSkipConfig, WhenSlotSkipped, }; +use fork_choice::CountUnrealized; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{ @@ -499,7 +500,7 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap()) + .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) .unwrap(); let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) @@ -613,7 +614,7 @@ async fn unaggregated_attestations_added_to_fork_choice_all_updated() { // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap()) + .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) .unwrap(); let validators: Vec = (0..VALIDATOR_COUNT).collect(); @@ -683,17 +684,16 @@ async fn run_skip_slot_test(skip_slots: u64) { assert_eq!( harness_b .chain - .process_block(harness_a.chain.head_snapshot().beacon_block.clone()) + .process_block( + harness_a.chain.head_snapshot().beacon_block.clone(), + CountUnrealized::True + ) .await .unwrap(), harness_a.chain.head_snapshot().beacon_block_root ); - harness_b - .chain - .recompute_head_at_current_slot() - .await - .expect("should run fork choice"); + harness_b.chain.recompute_head_at_current_slot().await; assert_eq!( harness_b.chain.head_snapshot().beacon_block.slot(), diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 500f5aa9ff..3517d06b15 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,4 +1,3 @@ -use eth2::ok_or_error; use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, @@ -6,23 +5,33 @@ use eth2::types::{ Slot, }; pub use eth2::Error; +use eth2::{ok_or_error, StatusCode}; use reqwest::{IntoUrl, Response}; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde::Serialize; use std::time::Duration; -pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 500; +pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; + +/// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20). +pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000; #[derive(Clone)] pub struct Timeouts { get_header: Duration, + post_validators: Duration, + post_blinded_blocks: Duration, + get_builder_status: Duration, } impl Default for Timeouts { fn default() -> Self { Self { get_header: Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS), + post_validators: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), + post_blinded_blocks: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), + get_builder_status: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), } } } @@ -51,14 +60,6 @@ impl BuilderHttpClient { }) } - async fn get(&self, url: U) -> Result { - self.get_response_with_timeout(url, None) - .await? - .json() - .await - .map_err(Error::Reqwest) - } - async fn get_with_timeout( &self, url: U, @@ -104,14 +105,13 @@ impl BuilderHttpClient { &self, url: U, body: &T, + timeout: Option, ) -> Result { - let response = self - .client - .post(url) - .json(body) - .send() - .await - .map_err(Error::Reqwest)?; + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.json(body).send().await.map_err(Error::Reqwest)?; ok_or_error(response).await } @@ -129,7 +129,8 @@ impl BuilderHttpClient { .push("builder") .push("validators"); - self.post_generic(path, &validator, None).await?; + self.post_generic(path, &validator, Some(self.timeouts.post_validators)) + .await?; Ok(()) } @@ -148,7 +149,11 @@ impl BuilderHttpClient { .push("blinded_blocks"); Ok(self - .post_with_raw_response(path, &blinded_block) + .post_with_raw_response( + path, + &blinded_block, + Some(self.timeouts.post_blinded_blocks), + ) .await? .json() .await?) @@ -160,7 +165,7 @@ impl BuilderHttpClient { slot: Slot, parent_hash: ExecutionBlockHash, pubkey: &PublicKeyBytes, - ) -> Result>, Error> { + ) -> Result>>, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() @@ -173,7 +178,13 @@ impl BuilderHttpClient { .push(format!("{parent_hash:?}").as_str()) .push(pubkey.as_hex_string().as_str()); - self.get_with_timeout(path, self.timeouts.get_header).await + let resp = self.get_with_timeout(path, self.timeouts.get_header).await; + + if matches!(resp, Err(Error::StatusCode(StatusCode::NO_CONTENT))) { + Ok(None) + } else { + resp.map(Some) + } } /// `GET /eth/v1/builder/status` @@ -187,6 +198,7 @@ impl BuilderHttpClient { .push("builder") .push("status"); - self.get(path).await + self.get_with_timeout(path, self.timeouts.get_builder_status) + .await } } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index b7f06183f1..d4c41244d2 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,6 +1,7 @@ use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; +use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ @@ -728,6 +729,7 @@ where } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); + start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone()); } Ok(Client { @@ -849,7 +851,7 @@ where .runtime_context .as_ref() .ok_or("caching_eth1_backend requires a runtime_context")? - .service_context("eth1_rpc".into()); + .service_context("deposit_contract_rpc".into()); let beacon_chain_builder = self .beacon_chain_builder .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index b13ca8f489..a5d5b37c7a 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -10,7 +10,7 @@ use types::{Graffiti, PublicKeyBytes}; const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; /// Defines how the client should initialize the `BeaconChain` and other components. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub enum ClientGenesis { /// Creates a genesis state as per the 2019 Canada interop specifications. Interop { @@ -21,6 +21,7 @@ pub enum ClientGenesis { FromStore, /// Connects to an eth1 node and waits until it can create the genesis state from the deposit /// contract. + #[default] DepositContract, /// Loads the genesis state from SSZ-encoded `BeaconState` bytes. /// @@ -38,12 +39,6 @@ pub enum ClientGenesis { }, } -impl Default for ClientGenesis { - fn default() -> Self { - Self::DepositContract - } -} - /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 9476819a4b..9f82cd2012 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,13 +1,16 @@ use crate::metrics; -use beacon_chain::{BeaconChain, BeaconChainTypes, ExecutionStatus}; +use beacon_chain::{ + merge_readiness::{MergeConfig, MergeReadiness}, + BeaconChain, BeaconChainTypes, ExecutionStatus, +}; use lighthouse_network::{types::SyncState, NetworkGlobals}; -use parking_lot::Mutex; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::{Duration, Instant}; +use tokio::sync::Mutex; use tokio::time::sleep; -use types::{EthSpec, Slot}; +use types::*; /// Create a warning log whenever the peer count is at or below this value. pub const WARN_PEER_COUNT: usize = 1; @@ -77,6 +80,7 @@ pub fn spawn_notifier( // Perform post-genesis logging. let mut last_backfill_log_slot = None; + loop { interval.tick().await; let connected_peer_count = network.connected_peers(); @@ -87,12 +91,12 @@ pub fn spawn_notifier( match (current_sync_state, &sync_state) { (_, SyncState::BackFillSyncing { .. }) => { // We have transitioned to a backfill sync. Reset the speedo. - let mut speedo = speedo.lock(); + let mut speedo = speedo.lock().await; speedo.clear(); } (SyncState::BackFillSyncing { .. }, _) => { // We have transitioned from a backfill sync, reset the speedo - let mut speedo = speedo.lock(); + let mut speedo = speedo.lock().await; speedo.clear(); } (_, _) => {} @@ -125,7 +129,7 @@ pub fn spawn_notifier( // progress. let mut sync_distance = current_slot - head_slot; - let mut speedo = speedo.lock(); + let mut speedo = speedo.lock().await; match current_sync_state { SyncState::BackFillSyncing { .. } => { // Observe backfilling sync info. @@ -306,6 +310,7 @@ pub fn spawn_notifier( } eth1_logging(&beacon_chain, &log); + merge_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -315,6 +320,88 @@ pub fn spawn_notifier( Ok(()) } +/// Provides some helpful logging to users to indicate if their node is ready for the Bellatrix +/// fork and subsequent merge transition. +async fn merge_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let merge_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_block + .message() + .body() + .execution_payload() + .map_or(false, |payload| { + payload.parent_hash() != ExecutionBlockHash::zero() + }); + + if merge_completed || !beacon_chain.is_time_to_prepare_for_bellatrix(current_slot) { + return; + } + + match beacon_chain.check_merge_readiness().await { + MergeReadiness::Ready { + config, + current_difficulty, + } => match config { + MergeConfig { + terminal_total_difficulty: Some(ttd), + terminal_block_hash: None, + terminal_block_hash_epoch: None, + } => { + info!( + log, + "Ready for the merge"; + "terminal_total_difficulty" => %ttd, + "current_difficulty" => current_difficulty + .map(|d| d.to_string()) + .unwrap_or_else(|| "??".into()), + ) + } + MergeConfig { + terminal_total_difficulty: _, + terminal_block_hash: Some(terminal_block_hash), + terminal_block_hash_epoch: Some(terminal_block_hash_epoch), + } => { + info!( + log, + "Ready for the merge"; + "info" => "you are using override parameters, please ensure that you \ + understand these parameters and their implications.", + "terminal_block_hash" => ?terminal_block_hash, + "terminal_block_hash_epoch" => ?terminal_block_hash_epoch, + ) + } + other => error!( + log, + "Inconsistent merge configuration"; + "config" => ?other + ), + }, + readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed { error: _ } => { + error!( + log, + "Not ready for merge"; + "info" => %readiness, + ) + } + readiness @ MergeReadiness::NotSynced => warn!( + log, + "Not ready for merge"; + "info" => %readiness, + ), + readiness @ MergeReadiness::NoExecutionEndpoint => warn!( + log, + "Not ready for merge"; + "info" => %readiness, + ), + } +} + fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); @@ -354,14 +441,14 @@ fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger warn!( log, - "Syncing eth1 block cache"; + "Syncing deposit contract block cache"; "est_blocks_remaining" => distance, ); } } else { error!( log, - "Unable to determine eth1 sync status"; + "Unable to determine deposit contract sync status"; ); } } diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 36a637d2ae..a4d4e5e254 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -14,7 +14,7 @@ use futures::future::TryFutureExt; use parking_lot::{RwLock, RwLockReadGuard}; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, trace, warn, Logger}; +use slog::{debug, error, info, trace, warn, Logger}; use std::fmt::Debug; use std::future::Future; use std::ops::{Range, RangeInclusive}; @@ -39,8 +39,6 @@ const GET_BLOCK_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; /// Timeout when doing an eth_getLogs to read the deposit contract logs. const GET_DEPOSIT_LOG_TIMEOUT_MILLIS: u64 = 60_000; -const WARNING_MSG: &str = "BLOCK PROPOSALS WILL FAIL WITHOUT VALID, SYNCED ETH1 CONNECTION"; - /// Number of blocks to download if the node detects it is lagging behind due to an inaccurate /// relationship between block-number-based follow distance and time-based follow distance. const CATCHUP_BATCH_SIZE: u64 = 128; @@ -202,7 +200,7 @@ async fn endpoint_state( if chain_id == Eth1Id::Custom(0) { warn!( log, - "Remote eth1 node is not synced"; + "Remote execution node is not synced"; "endpoint" => %endpoint, "action" => "trying fallbacks" ); @@ -211,11 +209,11 @@ async fn endpoint_state( if &chain_id != config_chain_id { warn!( log, - "Invalid eth1 chain id. Please switch to correct chain id on endpoint"; + "Invalid execution chain ID. Please switch to correct chain ID on endpoint"; "endpoint" => %endpoint, "action" => "trying fallbacks", - "expected" => format!("{:?}",config_chain_id), - "received" => format!("{:?}", chain_id), + "expected" => ?config_chain_id, + "received" => ?chain_id, ); Err(EndpointError::WrongChainId) } else { @@ -252,7 +250,7 @@ async fn get_remote_head_and_new_block_ranges( if remote_head_block.timestamp + node_far_behind_seconds < now { warn!( service.log, - "Eth1 endpoint is not synced"; + "Execution endpoint is not synced"; "endpoint" => %endpoint, "last_seen_block_unix_timestamp" => remote_head_block.timestamp, "action" => "trying fallback" @@ -264,7 +262,7 @@ async fn get_remote_head_and_new_block_ranges( if let SingleEndpointError::RemoteNotSynced { .. } = e { warn!( service.log, - "Eth1 endpoint is not synced"; + "Execution endpoint is not synced"; "endpoint" => %endpoint, "action" => "trying fallbacks" ); @@ -749,15 +747,11 @@ impl Service { .iter() .all(|error| matches!(error, SingleEndpointError::EndpointError(_))) { - crit!( + error!( self.log, - "Could not connect to a suitable eth1 node. Please ensure that you have \ - an eth1 http server running locally on http://localhost:8545 or specify \ - one or more (remote) endpoints using \ - `--eth1-endpoints `. \ - Also ensure that `eth` and `net` apis are enabled on the eth1 http \ - server"; - "warning" => WARNING_MSG + "No synced execution endpoint"; + "advice" => "ensure you have an execution node configured via \ + --execution-endpoint or if pre-merge, --eth1-endpoints" ); } } @@ -778,12 +772,7 @@ impl Service { get_remote_head_and_new_block_ranges(e, self, node_far_behind_seconds).await }) .await - .map_err(|e| { - format!( - "Failed to update Eth1 service: {:?}", - process_single_err(&e) - ) - })?; + .map_err(|e| format!("{:?}", process_single_err(&e)))?; if num_errors > 0 { info!(self.log, "Fetched data from fallback"; "fallback_number" => num_errors); @@ -815,16 +804,15 @@ impl Service { deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number(); } - let outcome = outcome_result.map_err(|e| { - format!("Failed to update eth1 deposit cache: {:?}", process_err(e)) - })?; + let outcome = outcome_result + .map_err(|e| format!("Failed to update deposit cache: {:?}", process_err(e)))?; trace!( self.log, - "Updated eth1 deposit cache"; + "Updated deposit cache"; "cached_deposits" => self.inner.deposit_cache.read().cache.len(), "logs_imported" => outcome.logs_imported, - "last_processed_eth1_block" => self.inner.deposit_cache.read().last_processed_block, + "last_processed_execution_block" => self.inner.deposit_cache.read().last_processed_block, ); Ok::<_, String>(outcome) }; @@ -833,11 +821,16 @@ impl Service { let outcome = self .update_block_cache(Some(new_block_numbers_block_cache), &endpoints) .await - .map_err(|e| format!("Failed to update eth1 block cache: {:?}", process_err(e)))?; + .map_err(|e| { + format!( + "Failed to update deposit contract block cache: {:?}", + process_err(e) + ) + })?; trace!( self.log, - "Updated eth1 block cache"; + "Updated deposit contract block cache"; "cached_blocks" => self.inner.block_cache.read().len(), "blocks_imported" => outcome.blocks_imported, "head_block" => outcome.head_block_number, @@ -890,13 +883,13 @@ impl Service { match update_result { Err(e) => error!( self.log, - "Failed to update eth1 cache"; + "Error updating deposit contract cache"; "retry_millis" => update_interval.as_millis(), "error" => e, ), Ok((deposit, block)) => debug!( self.log, - "Updated eth1 cache"; + "Updated deposit contract cache"; "retry_millis" => update_interval.as_millis(), "blocks" => format!("{:?}", block), "deposits" => format!("{:?}", deposit), @@ -908,11 +901,12 @@ impl Service { /// Returns the range of new block numbers to be considered for the given head type. fn relevant_new_block_numbers( &self, - remote_highest_block: u64, + remote_highest_block_number: u64, remote_highest_block_timestamp: Option, head_type: HeadType, ) -> Result>, SingleEndpointError> { let follow_distance = self.cache_follow_distance(); + let latest_cached_block = self.latest_cached_block(); let next_required_block = match head_type { HeadType::Deposit => self .deposits() @@ -920,18 +914,14 @@ impl Service { .last_processed_block .map(|n| n + 1) .unwrap_or_else(|| self.config().deposit_contract_deploy_block), - HeadType::BlockCache => self - .inner - .block_cache - .read() - .highest_block_number() - .map(|n| n + 1) + HeadType::BlockCache => latest_cached_block + .as_ref() + .map(|block| block.number + 1) .unwrap_or_else(|| self.config().lowest_cached_block_number), }; - let latest_cached_block = self.latest_cached_block(); relevant_block_range( - remote_highest_block, + remote_highest_block_number, remote_highest_block_timestamp, next_required_block, follow_distance, @@ -1183,7 +1173,7 @@ impl Service { debug!( self.log, - "Downloading eth1 blocks"; + "Downloading execution blocks"; "first" => ?required_block_numbers.first(), "last" => ?required_block_numbers.last(), ); @@ -1246,7 +1236,7 @@ impl Service { if blocks_imported > 0 { debug!( self.log, - "Imported eth1 block(s)"; + "Imported execution block(s)"; "latest_block_age" => latest_block_mins, "latest_block" => block_cache.highest_block_number(), "total_cached_blocks" => block_cache.len(), @@ -1255,7 +1245,7 @@ impl Service { } else { debug!( self.log, - "No new eth1 blocks imported"; + "No new execution blocks imported"; "latest_block" => block_cache.highest_block_number(), "cached_blocks" => block_cache.len(), ); @@ -1293,9 +1283,12 @@ fn relevant_block_range( let lagging = latest_cached_block.timestamp + cache_follow_distance * spec.seconds_per_eth1_block < remote_highest_block_timestamp; - let end_block = std::cmp::min( - remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE), - next_required_block + CATCHUP_BATCH_SIZE, + let end_block = std::cmp::max( + std::cmp::min( + remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE), + next_required_block + CATCHUP_BATCH_SIZE, + ), + remote_highest_block_number.saturating_sub(cache_follow_distance), ); if lagging && next_required_block <= end_block { return Ok(Some(next_required_block..=end_block)); diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index c181c19050..83f9454f8a 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -39,3 +39,8 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } builder_client = { path = "../builder_client" } +fork_choice = { path = "../../consensus/fork_choice" } +mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", tag = "v0.2.0"} +ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus"} +ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs"} + diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index a1e769e3e3..4f957d6387 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -71,8 +71,6 @@ impl From for Error { } } -pub struct EngineApi; - #[derive(Clone, Copy, Debug, PartialEq)] pub enum PayloadStatusV1Status { Valid, @@ -108,6 +106,8 @@ pub struct ExecutionBlock { pub block_number: u64, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub timestamp: u64, } /// Representation of an exection block with enough detail to reconstruct a payload. diff --git a/beacon_node/execution_layer/src/engine_api/auth.rs b/beacon_node/execution_layer/src/engine_api/auth.rs index 560e43585b..8fcdb2543d 100644 --- a/beacon_node/execution_layer/src/engine_api/auth.rs +++ b/beacon_node/execution_layer/src/engine_api/auth.rs @@ -25,7 +25,7 @@ impl From for Error { } /// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`. -#[derive(Zeroize)] +#[derive(Zeroize, Clone)] #[zeroize(drop)] pub struct JwtKey([u8; JWT_SECRET_LENGTH as usize]); @@ -159,12 +159,12 @@ pub struct Claims { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::JWT_SECRET; + use crate::test_utils::DEFAULT_JWT_SECRET; #[test] fn test_roundtrip() { let auth = Auth::new( - JwtKey::from_slice(&JWT_SECRET).unwrap(), + JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), Some("42".into()), Some("Lighthouse".into()), ); @@ -172,7 +172,7 @@ mod tests { let token = auth.generate_token_with_claims(&claims).unwrap(); assert_eq!( - Auth::validate_token(&token, &JwtKey::from_slice(&JWT_SECRET).unwrap()) + Auth::validate_token(&token, &JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()) .unwrap() .claims, claims diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 832771460e..a8eb42971e 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -7,7 +7,6 @@ use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; -use std::marker::PhantomData; use std::time::Duration; use types::EthSpec; @@ -169,7 +168,7 @@ pub mod deposit_log { /// state of the deposit contract. pub mod deposit_methods { use super::Log; - use crate::{EngineApi, HttpJsonRpc}; + use crate::HttpJsonRpc; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; use std::fmt; @@ -298,7 +297,7 @@ pub mod deposit_methods { } } - impl HttpJsonRpc { + impl HttpJsonRpc { /// Get the eth1 chain id of the given endpoint. pub async fn get_chain_id(&self, timeout: Duration) -> Result { let chain_id: String = self @@ -517,20 +516,18 @@ pub mod deposit_methods { } } -pub struct HttpJsonRpc { +pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, auth: Option, - _phantom: PhantomData, } -impl HttpJsonRpc { +impl HttpJsonRpc { pub fn new(url: SensitiveUrl) -> Result { Ok(Self { client: Client::builder().build()?, url, auth: None, - _phantom: PhantomData, }) } @@ -539,7 +536,6 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, auth: Some(auth), - _phantom: PhantomData, }) } @@ -592,7 +588,7 @@ impl std::fmt::Display for HttpJsonRpc { } } -impl HttpJsonRpc { +impl HttpJsonRpc { pub async fn upcheck(&self) -> Result<(), Error> { let result: serde_json::Value = self .rpc_request(ETH_SYNCING, json!([]), ETH_SYNCING_TIMEOUT) @@ -712,7 +708,7 @@ impl HttpJsonRpc { mod test { use super::auth::JwtKey; use super::*; - use crate::test_utils::{MockServer, JWT_SECRET}; + use crate::test_utils::{MockServer, DEFAULT_JWT_SECRET}; use std::future::Future; use std::str::FromStr; use std::sync::Arc; @@ -732,8 +728,10 @@ mod test { let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); // Create rpc clients that include JWT auth headers if `with_auth` is true. let (rpc_client, echo_client) = if with_auth { - let rpc_auth = Auth::new(JwtKey::from_slice(&JWT_SECRET).unwrap(), None, None); - let echo_auth = Auth::new(JwtKey::from_slice(&JWT_SECRET).unwrap(), None, None); + let rpc_auth = + Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); + let echo_auth = + Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); ( Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth).unwrap()), Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth).unwrap()), diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 5414c52623..9ed38b61b0 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,9 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; -use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayloadHeader, FixedVector, Transaction, Unsigned, - VariableList, -}; +use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -78,6 +75,7 @@ pub struct JsonExecutionPayloadHeaderV1 { pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, + #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, pub transactions_root: Hash256, @@ -142,6 +140,7 @@ pub struct JsonExecutionPayloadV1 { pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, + #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] @@ -430,62 +429,10 @@ impl From for JsonForkchoiceUpdatedV1Response { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum JsonProposeBlindedBlockResponseStatus { - Valid, - Invalid, - Syncing, -} -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(bound = "E: EthSpec")] -pub struct JsonProposeBlindedBlockResponse { - pub result: ExecutionPayload, - pub error: Option, -} - -impl From> for ExecutionPayload { - fn from(j: JsonProposeBlindedBlockResponse) -> Self { - let JsonProposeBlindedBlockResponse { result, error: _ } = j; - result - } -} - -impl From for ProposeBlindedBlockResponseStatus { - fn from(j: JsonProposeBlindedBlockResponseStatus) -> Self { - match j { - JsonProposeBlindedBlockResponseStatus::Valid => { - ProposeBlindedBlockResponseStatus::Valid - } - JsonProposeBlindedBlockResponseStatus::Invalid => { - ProposeBlindedBlockResponseStatus::Invalid - } - JsonProposeBlindedBlockResponseStatus::Syncing => { - ProposeBlindedBlockResponseStatus::Syncing - } - } - } -} -impl From for JsonProposeBlindedBlockResponseStatus { - fn from(f: ProposeBlindedBlockResponseStatus) -> Self { - match f { - ProposeBlindedBlockResponseStatus::Valid => { - JsonProposeBlindedBlockResponseStatus::Valid - } - ProposeBlindedBlockResponseStatus::Invalid => { - JsonProposeBlindedBlockResponseStatus::Invalid - } - ProposeBlindedBlockResponseStatus::Syncing => { - JsonProposeBlindedBlockResponseStatus::Syncing - } - } - } -} - #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { + #[serde(with = "eth2_serde_utils::u256_hex_be")] pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, #[serde(with = "eth2_serde_utils::u64_hex_be")] diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 34eef8a3fb..eb188c61f8 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,12 +1,14 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, + Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, }; use crate::HttpJsonRpc; use lru::LruCache; -use slog::{crit, debug, info, warn, Logger}; +use slog::{debug, error, info, Logger}; use std::future::Future; +use std::sync::Arc; +use task_executor::TaskExecutor; use tokio::sync::{Mutex, RwLock}; use types::{Address, ExecutionBlockHash, Hash256}; @@ -16,7 +18,7 @@ use types::{Address, ExecutionBlockHash, Hash256}; const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; /// Stores the remembered state of a engine. -#[derive(Copy, Clone, PartialEq)] +#[derive(Copy, Clone, PartialEq, Debug)] enum EngineState { Synced, Offline, @@ -31,22 +33,6 @@ pub struct ForkChoiceState { pub finalized_block_hash: ExecutionBlockHash, } -/// Used to enable/disable logging on some tasks. -#[derive(Copy, Clone, PartialEq)] -pub enum Logging { - Enabled, - Disabled, -} - -impl Logging { - pub fn is_enabled(&self) -> bool { - match self { - Logging::Enabled => true, - Logging::Disabled => false, - } - } -} - #[derive(Hash, PartialEq, std::cmp::Eq)] struct PayloadIdCacheKey { pub head_block_hash: ExecutionBlockHash, @@ -55,20 +41,34 @@ struct PayloadIdCacheKey { pub suggested_fee_recipient: Address, } -/// An execution engine. -pub struct Engine { - pub api: HttpJsonRpc, - payload_id_cache: Mutex>, - state: RwLock, +#[derive(Debug)] +pub enum EngineError { + Offline, + Api { error: EngineApiError }, + BuilderApi { error: EngineApiError }, + Auth, } -impl Engine { +/// An execution engine. +pub struct Engine { + pub api: HttpJsonRpc, + payload_id_cache: Mutex>, + state: RwLock, + pub latest_forkchoice_state: RwLock>, + pub executor: TaskExecutor, + pub log: Logger, +} + +impl Engine { /// Creates a new, offline engine. - pub fn new(api: HttpJsonRpc) -> Self { + pub fn new(api: HttpJsonRpc, executor: TaskExecutor, log: &Logger) -> Self { Self { api, payload_id_cache: Mutex::new(LruCache::new(PAYLOAD_ID_LRU_CACHE_SIZE)), state: RwLock::new(EngineState::Offline), + latest_forkchoice_state: Default::default(), + executor, + log: log.clone(), } } @@ -90,9 +90,7 @@ impl Engine { }) .cloned() } -} -impl Engine { pub async fn notify_forkchoice_updated( &self, forkchoice_state: ForkChoiceState, @@ -120,26 +118,7 @@ impl Engine { Ok(response) } -} -// This structure used to hold multiple execution engines managed in a fallback manner. This -// functionality has been removed following https://github.com/sigp/lighthouse/issues/3118 and this -// struct will likely be removed in the future. -pub struct Engines { - pub engine: Engine, - pub latest_forkchoice_state: RwLock>, - pub log: Logger, -} - -#[derive(Debug)] -pub enum EngineError { - Offline, - Api { error: EngineApiError }, - BuilderApi { error: EngineApiError }, - Auth, -} - -impl Engines { async fn get_latest_forkchoice_state(&self) -> Option { *self.latest_forkchoice_state.read().await } @@ -169,12 +148,7 @@ impl Engines { // For simplicity, payload attributes are never included in this call. It may be // reasonable to include them in the future. - if let Err(e) = self - .engine - .api - .forkchoice_updated_v1(forkchoice_state, None) - .await - { + if let Err(e) = self.api.forkchoice_updated_v1(forkchoice_state, None).await { debug!( self.log, "Failed to issue latest head to engine"; @@ -191,166 +165,119 @@ impl Engines { /// Returns `true` if the engine has a "synced" status. pub async fn is_synced(&self) -> bool { - *self.engine.state.read().await == EngineState::Synced + *self.state.read().await == EngineState::Synced } + /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. - pub async fn upcheck_not_synced(&self, logging: Logging) { - let mut state_lock = self.engine.state.write().await; - if *state_lock != EngineState::Synced { - match self.engine.api.upcheck().await { - Ok(()) => { - if logging.is_enabled() { - info!( - self.log, - "Execution engine online"; - ); - } + pub async fn upcheck(&self) { + let state: EngineState = match self.api.upcheck().await { + Ok(()) => { + let mut state = self.state.write().await; + + if *state != EngineState::Synced { + info!( + self.log, + "Execution engine online"; + ); + // Send the node our latest forkchoice_state. self.send_latest_forkchoice_state().await; - - *state_lock = EngineState::Synced + } else { + debug!( + self.log, + "Execution engine online"; + ); } - Err(EngineApiError::IsSyncing) => { - if logging.is_enabled() { - warn!( - self.log, - "Execution engine syncing"; - ) - } - // Send the node our latest forkchoice_state, it may assist with syncing. - self.send_latest_forkchoice_state().await; - - *state_lock = EngineState::Syncing - } - Err(EngineApiError::Auth(err)) => { - if logging.is_enabled() { - warn!( - self.log, - "Failed jwt authorization"; - "error" => ?err, - ); - } - - *state_lock = EngineState::AuthFailed - } - Err(e) => { - if logging.is_enabled() { - warn!( - self.log, - "Execution engine offline"; - "error" => ?e, - ) - } - } + *state = EngineState::Synced; + *state } - } + Err(EngineApiError::IsSyncing) => { + let mut state = self.state.write().await; + *state = EngineState::Syncing; + *state + } + Err(EngineApiError::Auth(err)) => { + error!( + self.log, + "Failed jwt authorization"; + "error" => ?err, + ); - if *state_lock != EngineState::Synced && logging.is_enabled() { - crit!( - self.log, - "No synced execution engines"; - ) - } - } - - /// Run `func` on the node. - /// - /// This function might try to run `func` twice. If the node returns an error it will try to - /// upcheck it and then run the function again. - pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result - where - F: Fn(&'a Engine) -> G + Copy, - G: Future>, - { - match self.first_success_without_retry(func).await { - Ok(result) => Ok(result), + let mut state = self.state.write().await; + *state = EngineState::AuthFailed; + *state + } Err(e) => { - debug!(self.log, "First engine call failed. Retrying"; "err" => ?e); - // Try to recover the node. - self.upcheck_not_synced(Logging::Enabled).await; - // Try again. - self.first_success_without_retry(func).await - } - } - } + error!( + self.log, + "Error during execution engine upcheck"; + "error" => ?e, + ); - /// Run `func` on the node. - pub async fn first_success_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Result - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let (engine_synced, engine_auth_failed) = { - let state = self.engine.state.read().await; - ( - *state == EngineState::Synced, - *state == EngineState::AuthFailed, - ) + let mut state = self.state.write().await; + *state = EngineState::Offline; + *state + } }; - if engine_synced { - match func(&self.engine).await { - Ok(result) => Ok(result), - Err(error) => { - debug!( - self.log, - "Execution engine call failed"; - "error" => ?error, - ); - *self.engine.state.write().await = EngineState::Offline; - Err(EngineError::Api { error }) - } - } - } else if engine_auth_failed { - Err(EngineError::Auth) - } else { - Err(EngineError::Offline) - } + + debug!( + self.log, + "Execution engine upcheck complete"; + "state" => ?state, + ); } - /// Runs `func` on the node. + /// Run `func` on the node regardless of the node's current state. /// - /// This function might try to run `func` twice. If all nodes return an error on the first time - /// it runs, it will try to upcheck all offline nodes and then run the function again. - pub async fn broadcast<'a, F, G, H>(&'a self, func: F) -> Result + /// ## Note + /// + /// This function takes locks on `self.state`, holding a conflicting lock might cause a + /// deadlock. + pub async fn request<'a, F, G, H>(self: &'a Arc, func: F) -> Result where - F: Fn(&'a Engine) -> G + Copy, + F: Fn(&'a Engine) -> G, G: Future>, { - match self.broadcast_without_retry(func).await { - Err(EngineError::Offline { .. }) => { - self.upcheck_not_synced(Logging::Enabled).await; - self.broadcast_without_retry(func).await - } - other => other, - } - } + match func(self).await { + Ok(result) => { + // Take a clone *without* holding the read-lock since the `upcheck` function will + // take a write-lock. + let state: EngineState = *self.state.read().await; - /// Runs `func` on the node if it's last state is not offline. - pub async fn broadcast_without_retry<'a, F, G, H>(&'a self, func: F) -> Result - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let func = &func; - if *self.engine.state.read().await == EngineState::Offline { - Err(EngineError::Offline) - } else { - match func(&self.engine).await { - Ok(res) => Ok(res), - Err(error) => { - debug!( - self.log, - "Execution engine call failed"; - "error" => ?error, + // If this request just returned successfully but we don't think this node is + // synced, check to see if it just became synced. This helps to ensure that the + // networking stack can get fast feedback about a synced engine. + if state != EngineState::Synced { + // Spawn the upcheck in another task to avoid slowing down this request. + let inner_self = self.clone(); + self.executor.spawn( + async move { inner_self.upcheck().await }, + "upcheck_after_success", ); - *self.engine.state.write().await = EngineState::Offline; - Err(EngineError::Api { error }) } + + Ok(result) + } + Err(error) => { + error!( + self.log, + "Execution engine call failed"; + "error" => ?error, + ); + + // The node just returned an error, run an upcheck so we can update the endpoint + // state. + // + // Spawn the upcheck in another task to avoid slowing down this request. + let inner_self = self.clone(); + self.executor.spawn( + async move { inner_self.upcheck().await }, + "upcheck_after_error", + ); + + Err(EngineError::Api { error }) } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 8897f8f67a..aea952a57d 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,13 +4,15 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. +use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; use engine_api::Error as ApiError; pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; pub use engines::ForkChoiceState; -use engines::{Engine, EngineError, Engines, Logging}; +use engines::{Engine, EngineError}; +use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; use payload_status::process_payload_status; pub use payload_status::PayloadStatus; @@ -27,16 +29,17 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::{ sync::{Mutex, MutexGuard, RwLock}, - time::{sleep, sleep_until, Instant}, + time::sleep, }; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, + BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, }; mod engine_api; mod engines; mod metrics; +pub mod payload_cache; mod payload_status; pub mod test_utils; @@ -64,10 +67,11 @@ const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); #[derive(Debug)] pub enum Error { - NoEngines, + NoEngine, NoPayloadBuilder, ApiError(ApiError), Builder(builder_client::Error), + NoHeaderFromBuilder, EngineError(Box), NotSynced, ShuttingDown, @@ -100,8 +104,28 @@ pub struct Proposer { payload_attributes: PayloadAttributes, } +/// Information from the beacon chain that is necessary for querying the builder API. +pub struct BuilderParams { + pub pubkey: PublicKeyBytes, + pub slot: Slot, + pub chain_health: ChainHealth, +} + +pub enum ChainHealth { + Healthy, + Unhealthy(FailedCondition), + PreMerge, +} + +#[derive(Debug)] +pub enum FailedCondition { + Skips, + SkipsPerEpoch, + EpochsSinceFinalization, +} + struct Inner { - engines: Engines, + engine: Arc, builder: Option, execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option
, @@ -109,7 +133,7 @@ struct Inner { execution_blocks: Mutex>, proposers: RwLock>, executor: TaskExecutor, - phantom: std::marker::PhantomData, + payload_cache: PayloadCache, log: Logger, } @@ -132,22 +156,15 @@ pub struct Config { pub default_datadir: PathBuf, } -/// Provides access to one or more execution engines and provides a neat interface for consumption -/// by the `BeaconChain`. -/// -/// When there is more than one execution node specified, the others will be used in a "fallback" -/// fashion. Some requests may be broadcast to all nodes and others might only be sent to the first -/// node that returns a valid response. Ultimately, the purpose of fallback nodes is to provide -/// redundancy in the case where one node is offline. -/// -/// The fallback nodes have an ordering. The first supplied will be the first contacted, and so on. +/// Provides access to one execution engine and provides a neat interface for consumption by the +/// `BeaconChain`. #[derive(Clone)] pub struct ExecutionLayer { inner: Arc>, } impl ExecutionLayer { - /// Instantiate `Self` with Execution engines specified using `Config`, all using the JSON-RPC via HTTP. + /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { let Config { execution_endpoints: urls, @@ -162,7 +179,7 @@ impl ExecutionLayer { if urls.len() > 1 { warn!(log, "Only the first execution engine url will be used"); } - let execution_url = urls.into_iter().next().ok_or(Error::NoEngines)?; + let execution_url = urls.into_iter().next().ok_or(Error::NoEngine)?; // Use the default jwt secret path if not provided via cli. let secret_file = secret_files @@ -198,12 +215,11 @@ impl ExecutionLayer { .map_err(Error::InvalidJWTSecret) }?; - let engine: Engine = { + let engine: Engine = { let auth = Auth::new(jwt_key, jwt_id, jwt_version); debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); - let api = HttpJsonRpc::::new_with_auth(execution_url, auth) - .map_err(Error::ApiError)?; - Engine::::new(api) + let api = HttpJsonRpc::new_with_auth(execution_url, auth).map_err(Error::ApiError)?; + Engine::new(api, executor.clone(), &log) }; let builder = builder_url @@ -211,11 +227,7 @@ impl ExecutionLayer { .transpose()?; let inner = Inner { - engines: Engines { - engine, - latest_forkchoice_state: <_>::default(), - log: log.clone(), - }, + engine: Arc::new(engine), builder, execution_engine_forkchoice_lock: <_>::default(), suggested_fee_recipient, @@ -223,7 +235,7 @@ impl ExecutionLayer { proposers: RwLock::new(HashMap::new()), execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, - phantom: std::marker::PhantomData, + payload_cache: PayloadCache::default(), log, }; @@ -234,18 +246,38 @@ impl ExecutionLayer { } impl ExecutionLayer { - fn engines(&self) -> &Engines { - &self.inner.engines + fn engine(&self) -> &Arc { + &self.inner.engine } pub fn builder(&self) -> &Option { &self.inner.builder } + /// Cache a full payload, keyed on the `tree_hash_root` of its `transactions` field. + fn cache_payload(&self, payload: &ExecutionPayload) -> Option> { + self.inner.payload_cache.put(payload.clone()) + } + + /// Attempt to retrieve a full payload from the payload cache by the `transactions_root`. + pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { + self.inner.payload_cache.pop(root) + } + pub fn executor(&self) -> &TaskExecutor { &self.inner.executor } + /// Get the current difficulty of the PoW chain. + pub async fn get_current_difficulty(&self) -> Result { + let block = self + .engine() + .api + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) + .await? + .ok_or(ApiError::ExecutionHeadBlockNotFound)?; + Ok(block.total_difficulty) + } /// Note: this function returns a mutex guard, be careful to avoid deadlocks. async fn execution_blocks( &self, @@ -281,54 +313,18 @@ impl ExecutionLayer { self.executor().spawn(generate_future(self.clone()), name); } - /// Spawns a routine which attempts to keep the execution engines online. + /// Spawns a routine which attempts to keep the execution engine online. pub fn spawn_watchdog_routine(&self, slot_clock: S) { let watchdog = |el: ExecutionLayer| async move { // Run one task immediately. el.watchdog_task().await; - let recurring_task = - |el: ExecutionLayer, now: Instant, duration_to_next_slot: Duration| async move { - // We run the task three times per slot. - // - // The interval between each task is 1/3rd of the slot duration. This matches nicely - // with the attestation production times (unagg. at 1/3rd, agg at 2/3rd). - // - // Each task is offset by 3/4ths of the interval. - // - // On mainnet, this means we will run tasks at: - // - // - 3s after slot start: 1s before publishing unaggregated attestations. - // - 7s after slot start: 1s before publishing aggregated attestations. - // - 11s after slot start: 1s before the next slot starts. - let interval = duration_to_next_slot / 3; - let offset = (interval / 4) * 3; - - let first_execution = duration_to_next_slot + offset; - let second_execution = first_execution + interval; - let third_execution = second_execution + interval; - - sleep_until(now + first_execution).await; - el.engines().upcheck_not_synced(Logging::Disabled).await; - - sleep_until(now + second_execution).await; - el.engines().upcheck_not_synced(Logging::Disabled).await; - - sleep_until(now + third_execution).await; - el.engines().upcheck_not_synced(Logging::Disabled).await; - }; - // Start the loop to periodically update. loop { - if let Some(duration) = slot_clock.duration_to_next_slot() { - let now = Instant::now(); - - // Spawn a new task rather than waiting for this to finish. This ensure that a - // slow run doesn't prevent the next run from starting. - el.spawn(|el| recurring_task(el, now, duration), "exec_watchdog_task"); - } else { - error!(el.log(), "Failed to spawn watchdog task"); - } + el.spawn( + |el| async move { el.watchdog_task().await }, + "exec_watchdog_task", + ); sleep(slot_clock.slot_duration()).await; } }; @@ -338,8 +334,7 @@ impl ExecutionLayer { /// Performs a single execution of the watchdog routine. pub async fn watchdog_task(&self) { - // Disable logging since this runs frequently and may get annoying. - self.engines().upcheck_not_synced(Logging::Disabled).await; + self.engine().upcheck().await; } /// Spawns a routine which cleans the cached proposer data periodically. @@ -399,9 +394,32 @@ impl ExecutionLayer { self.spawn(routine, "exec_config_poll"); } - /// Returns `true` if there is at least one synced and reachable engine. + /// Returns `true` if the execution engine is synced and reachable. pub async fn is_synced(&self) -> bool { - self.engines().is_synced().await + self.engine().is_synced().await + } + + /// Execution nodes return a "SYNCED" response when they do not have any peers. + /// + /// This function is a wrapper over `Self::is_synced` that makes an additional + /// check for the execution layer sync status. Checks if the latest block has + /// a `block_number != 0`. + /// Returns the `Self::is_synced` response if unable to get latest block. + pub async fn is_synced_for_notifier(&self) -> bool { + let synced = self.is_synced().await; + if synced { + if let Ok(Some(block)) = self + .engine() + .api + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) + .await + { + if block.block_number == 0 { + return false; + } + } + } + synced } /// Updates the proposer preparation data provided by validators @@ -465,23 +483,6 @@ impl ExecutionLayer { if let Some(preparation_data_entry) = self.proposer_preparation_data().await.get(&proposer_index) { - if let Some(suggested_fee_recipient) = self.inner.suggested_fee_recipient { - if preparation_data_entry.preparation_data.fee_recipient != suggested_fee_recipient - { - warn!( - self.log(), - "Inconsistent fee recipient"; - "msg" => "The fee recipient returned from the Execution Engine differs \ - from the suggested_fee_recipient set on the beacon node. This could \ - indicate that fees are being diverted to another address. Please \ - ensure that the value of suggested_fee_recipient is set correctly and \ - that the Execution Engine is trusted.", - "proposer_index" => ?proposer_index, - "fee_recipient" => ?preparation_data_entry.preparation_data.fee_recipient, - "suggested_fee_recipient" => ?suggested_fee_recipient, - ) - } - } // The values provided via the API have first priority. preparation_data_entry.preparation_data.fee_recipient } else if let Some(address) = self.inner.suggested_fee_recipient { @@ -518,10 +519,10 @@ impl ExecutionLayer { parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - finalized_block_hash: ExecutionBlockHash, proposer_index: u64, - pubkey: Option, - slot: Slot, + forkchoice_update_params: ForkchoiceUpdateParameters, + builder_params: BuilderParams, + spec: &ChainSpec, ) -> Result { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; @@ -535,10 +536,10 @@ impl ExecutionLayer { parent_hash, timestamp, prev_randao, - finalized_block_hash, suggested_fee_recipient, - pubkey, - slot, + forkchoice_update_params, + builder_params, + spec, ) .await } @@ -551,8 +552,8 @@ impl ExecutionLayer { parent_hash, timestamp, prev_randao, - finalized_block_hash, suggested_fee_recipient, + forkchoice_update_params, ) .await } @@ -565,37 +566,143 @@ impl ExecutionLayer { parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - finalized_block_hash: ExecutionBlockHash, suggested_fee_recipient: Address, - pubkey_opt: Option, - slot: Slot, + forkchoice_update_params: ForkchoiceUpdateParameters, + builder_params: BuilderParams, + spec: &ChainSpec, ) -> Result { - //FIXME(sean) fallback logic included in PR #3134 + if let Some(builder) = self.builder() { + let slot = builder_params.slot; + let pubkey = builder_params.pubkey; - // Don't attempt to outsource payload construction until after the merge transition has been - // finalized. We want to be conservative with payload construction until then. - if let (Some(builder), Some(pubkey)) = (self.builder(), pubkey_opt) { - if finalized_block_hash != ExecutionBlockHash::zero() { - info!( - self.log(), - "Requesting blinded header from connected builder"; - "slot" => ?slot, - "pubkey" => ?pubkey, - "parent_hash" => ?parent_hash, - ); - return builder - .get_builder_header::(slot, parent_hash, &pubkey) - .await - .map(|d| d.data.message.header) - .map_err(Error::Builder); + match builder_params.chain_health { + ChainHealth::Healthy => { + info!( + self.log(), + "Requesting blinded header from connected builder"; + "slot" => ?slot, + "pubkey" => ?pubkey, + "parent_hash" => ?parent_hash, + ); + let (relay_result, local_result) = tokio::join!( + builder.get_builder_header::(slot, parent_hash, &pubkey), + self.get_full_payload_caching( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + ) + ); + + return match (relay_result, local_result) { + (Err(e), Ok(local)) => { + warn!( + self.log(), + "Unable to retrieve a payload from a connected \ + builder, falling back to the local execution client: {e:?}" + ); + Ok(local) + } + (Ok(None), Ok(local)) => { + warn!( + self.log(), + "No payload provided by connected builder. \ + Attempting to propose through local execution engine" + ); + Ok(local) + } + (Ok(Some(relay)), Ok(local)) => { + let is_signature_valid = relay.data.verify_signature(spec); + let header = relay.data.message.header; + + info!( + self.log(), + "Received a payload header from the connected builder"; + "block_hash" => ?header.block_hash(), + ); + + if header.parent_hash() != parent_hash { + warn!( + self.log(), + "Invalid parent hash from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.prev_randao() != prev_randao { + warn!( + self.log(), + "Invalid prev randao from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.timestamp() != local.timestamp() { + warn!( + self.log(), + "Invalid timestamp from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.block_number() != local.block_number() { + warn!( + self.log(), + "Invalid block number from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if !matches!(relay.version, Some(ForkName::Merge)) { + // Once fork information is added to the payload, we will need to + // check that the local and relay payloads match. At this point, if + // we are requesting a payload at all, we have to assume this is + // the Bellatrix fork. + warn!( + self.log(), + "Invalid fork from connected builder, falling \ + back to local execution engine." + ); + Ok(local) + } else if !is_signature_valid { + let pubkey_bytes = relay.data.message.pubkey; + warn!(self.log(), "Invalid signature for pubkey {pubkey_bytes} on \ + bid from connected builder, falling back to local execution engine."); + Ok(local) + } else { + if header.fee_recipient() != suggested_fee_recipient { + info!( + self.log(), + "Fee recipient from connected builder does \ + not match, using it anyways." + ); + } + Ok(header) + } + } + (relay_result, Err(local_error)) => { + warn!(self.log(), "Failure from local execution engine. Attempting to \ + propose through connected builder"; "error" => ?local_error); + relay_result + .map_err(Error::Builder)? + .ok_or(Error::NoHeaderFromBuilder) + .map(|d| d.data.message.header) + } + }; + } + ChainHealth::Unhealthy(condition) => { + info!(self.log(), "Due to poor chain health the local execution engine will be used \ + for payload construction. To adjust chain health conditions \ + Use `builder-fallback` prefixed flags"; + "failed_condition" => ?condition) + } + // Intentional no-op, so we never attempt builder API proposals pre-merge. + ChainHealth::PreMerge => (), } } - self.get_full_payload::( + self.get_full_payload_caching( parent_hash, timestamp, prev_randao, - finalized_block_hash, suggested_fee_recipient, + forkchoice_update_params, ) .await } @@ -606,27 +713,47 @@ impl ExecutionLayer { parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - finalized_block_hash: ExecutionBlockHash, suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, ) -> Result { self.get_full_payload_with( parent_hash, timestamp, prev_randao, - finalized_block_hash, suggested_fee_recipient, + forkchoice_update_params, noop, ) .await } + /// Get a full payload and cache its result in the execution layer's payload cache. + async fn get_full_payload_caching>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, + ) -> Result { + self.get_full_payload_with( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + Self::cache_payload, + ) + .await + } + async fn get_full_payload_with>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - finalized_block_hash: ExecutionBlockHash, suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, ) -> Result { debug!( @@ -637,8 +764,8 @@ impl ExecutionLayer { "timestamp" => timestamp, "parent_hash" => ?parent_hash, ); - self.engines() - .first_success(|engine| async move { + self.engine() + .request(|engine| async move { let payload_id = if let Some(id) = engine .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) .await @@ -650,20 +777,20 @@ impl ExecutionLayer { ); id } else { - // The payload id has *not* been cached for this engine. Trigger an artificial + // The payload id has *not* been cached. Trigger an artificial // fork choice update to retrieve a payload ID. - // - // TODO(merge): a better algorithm might try to favour a node that already had a - // cached payload id, since a payload that has had more time to produce is - // likely to be more profitable. metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, &[metrics::MISS], ); let fork_choice_state = ForkChoiceState { head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash, + safe_block_hash: forkchoice_update_params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), + finalized_block_hash: forkchoice_update_params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), }; let payload_attributes = PayloadAttributes { timestamp, @@ -671,37 +798,53 @@ impl ExecutionLayer { suggested_fee_recipient, }; - let response = engine - .notify_forkchoice_updated( - fork_choice_state, - Some(payload_attributes), - self.log(), - ) - .await?; + let response = engine + .notify_forkchoice_updated( + fork_choice_state, + Some(payload_attributes), + self.log(), + ) + .await?; - match response.payload_id { - Some(payload_id) => payload_id, - None => { - error!( - self.log(), - "Exec engine unable to produce payload"; - "msg" => "No payload ID, the engine is likely syncing. \ - This has the potential to cause a missed block \ - proposal.", - "status" => ?response.payload_status - ); - return Err(ApiError::PayloadIdUnavailable); - } - } - }; + match response.payload_id { + Some(payload_id) => payload_id, + None => { + error!( + self.log(), + "Exec engine unable to produce payload"; + "msg" => "No payload ID, the engine is likely syncing. \ + This has the potential to cause a missed block proposal.", + "status" => ?response.payload_status + ); + return Err(ApiError::PayloadIdUnavailable); + } + } + }; engine .api .get_payload_v1::(payload_id) .await .map(|full_payload| { + if full_payload.fee_recipient != suggested_fee_recipient { + error!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "fee_recipient" => ?full_payload.fee_recipient, + "suggested_fee_recipient" => ?suggested_fee_recipient, + ); + } if f(self, &full_payload).is_some() { - warn!(self.log(), "Duplicate payload cached, this might indicate redundant proposal attempts."); + warn!( + self.log(), + "Duplicate payload cached, this might indicate redundant proposal \ + attempts." + ); } full_payload.into() }) @@ -741,12 +884,12 @@ impl ExecutionLayer { "block_number" => execution_payload.block_number, ); - let broadcast_result = self - .engines() - .broadcast(|engine| engine.api.new_payload_v1(execution_payload.clone())) + let result = self + .engine() + .request(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; - process_payload_status(execution_payload.block_hash, broadcast_result, self.log()) + process_payload_status(execution_payload.block_hash, result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) } @@ -825,6 +968,7 @@ impl ExecutionLayer { pub async fn notify_forkchoice_updated( &self, head_block_hash: ExecutionBlockHash, + justified_block_hash: ExecutionBlockHash, finalized_block_hash: ExecutionBlockHash, current_slot: Slot, head_block_root: Hash256, @@ -838,6 +982,7 @@ impl ExecutionLayer { self.log(), "Issuing engine_forkchoiceUpdated"; "finalized_block_hash" => ?finalized_block_hash, + "justified_block_hash" => ?justified_block_hash, "head_block_hash" => ?head_block_hash, ); @@ -864,21 +1009,19 @@ impl ExecutionLayer { } } - // see https://hackmd.io/@n0ble/kintsugi-spec#Engine-API - // for now, we must set safe_block_hash = head_block_hash let forkchoice_state = ForkChoiceState { head_block_hash, - safe_block_hash: head_block_hash, + safe_block_hash: justified_block_hash, finalized_block_hash, }; - self.engines() + self.engine() .set_latest_forkchoice_state(forkchoice_state) .await; - let broadcast_result = self - .engines() - .broadcast(|engine| async move { + let result = self + .engine() + .request(|engine| async move { engine .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) .await @@ -887,7 +1030,7 @@ impl ExecutionLayer { process_payload_status( head_block_hash, - broadcast_result.map(|response| response.payload_status), + result.map(|response| response.payload_status), self.log(), ) .map_err(Box::new) @@ -901,12 +1044,12 @@ impl ExecutionLayer { terminal_block_number: 0, }; - let broadcast_result = self - .engines() - .broadcast(|engine| engine.api.exchange_transition_configuration_v1(local)) + let result = self + .engine() + .request(|engine| engine.api.exchange_transition_configuration_v1(local)) .await; - match broadcast_result { + match result { Ok(remote) => { if local.terminal_total_difficulty != remote.terminal_total_difficulty || local.terminal_block_hash != remote.terminal_block_hash @@ -951,6 +1094,7 @@ impl ExecutionLayer { pub async fn get_terminal_pow_block_hash( &self, spec: &ChainSpec, + timestamp: u64, ) -> Result, Error> { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, @@ -958,8 +1102,8 @@ impl ExecutionLayer { ); let hash_opt = self - .engines() - .first_success(|engine| async move { + .engine() + .request(|engine| async move { let terminal_block_hash = spec.terminal_block_hash; if terminal_block_hash != ExecutionBlockHash::zero() { if self @@ -973,8 +1117,19 @@ impl ExecutionLayer { } } - self.get_pow_block_hash_at_total_difficulty(engine, spec) - .await + let block = self.get_pow_block_at_total_difficulty(engine, spec).await?; + if let Some(pow_block) = block { + // If `terminal_block.timestamp == transition_block.timestamp`, + // we violate the invariant that a block's timestamp must be + // strictly greater than its parent's timestamp. + // The execution layer will reject a fcu call with such payload + // attributes leading to a missed block. + // Hence, we return `None` in such a case. + if pow_block.timestamp >= timestamp { + return Ok(None); + } + } + Ok(block.map(|b| b.block_hash)) }) .await .map_err(Box::new) @@ -1002,11 +1157,11 @@ impl ExecutionLayer { /// `get_pow_block_at_terminal_total_difficulty` /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md - async fn get_pow_block_hash_at_total_difficulty( + async fn get_pow_block_at_total_difficulty( &self, - engine: &Engine, + engine: &Engine, spec: &ChainSpec, - ) -> Result, ApiError> { + ) -> Result, ApiError> { let mut block = engine .api .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) @@ -1019,7 +1174,7 @@ impl ExecutionLayer { let block_reached_ttd = block.total_difficulty >= spec.terminal_total_difficulty; if block_reached_ttd { if block.parent_hash == ExecutionBlockHash::zero() { - return Ok(Some(block.block_hash)); + return Ok(Some(block)); } let parent = self .get_pow_block(engine, block.parent_hash) @@ -1028,7 +1183,7 @@ impl ExecutionLayer { let parent_reached_ttd = parent.total_difficulty >= spec.terminal_total_difficulty; if block_reached_ttd && !parent_reached_ttd { - return Ok(Some(block.block_hash)); + return Ok(Some(block)); } else { block = parent; } @@ -1045,8 +1200,8 @@ impl ExecutionLayer { /// - `Some(true)` if the given `block_hash` is the terminal proof-of-work block. /// - `Some(false)` if the given `block_hash` is certainly *not* the terminal proof-of-work /// block. - /// - `None` if the `block_hash` or its parent were not present on the execution engines. - /// - `Err(_)` if there was an error connecting to the execution engines. + /// - `None` if the `block_hash` or its parent were not present on the execution engine. + /// - `Err(_)` if there was an error connecting to the execution engine. /// /// ## Fallback Behaviour /// @@ -1074,8 +1229,8 @@ impl ExecutionLayer { &[metrics::IS_VALID_TERMINAL_POW_BLOCK_HASH], ); - self.engines() - .broadcast(|engine| async move { + self.engine() + .request(|engine| async move { if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? { if let Some(pow_parent) = self.get_pow_block(engine, pow_block.parent_hash).await? @@ -1118,7 +1273,7 @@ impl ExecutionLayer { /// https://github.com/ethereum/consensus-specs/issues/2636 async fn get_pow_block( &self, - engine: &Engine, + engine: &Engine, hash: ExecutionBlockHash, ) -> Result, ApiError> { if let Some(cached) = self.execution_blocks().await.get(&hash).copied() { @@ -1141,8 +1296,8 @@ impl ExecutionLayer { &self, hash: ExecutionBlockHash, ) -> Result>, Error> { - self.engines() - .first_success(|engine| async move { + self.engine() + .request(|engine| async move { self.get_payload_by_block_hash_from_engine(engine, hash) .await }) @@ -1153,7 +1308,7 @@ impl ExecutionLayer { async fn get_payload_by_block_hash_from_engine( &self, - engine: &Engine, + engine: &Engine, hash: ExecutionBlockHash, ) -> Result>, ApiError> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); @@ -1245,27 +1400,62 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_block_prior_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; - assert_eq!(el.get_terminal_pow_block_hash(&spec).await.unwrap(), None) + el.engine().upcheck().await; + assert_eq!( + el.get_terminal_pow_block_hash(&spec, timestamp_now()) + .await + .unwrap(), + None + ) }) .await .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { assert_eq!( - el.get_terminal_pow_block_hash(&spec).await.unwrap(), + el.get_terminal_pow_block_hash(&spec, timestamp_now()) + .await + .unwrap(), Some(terminal_block.unwrap().block_hash) ) }) .await; } + #[tokio::test] + async fn rejects_terminal_block_with_equal_timestamp() { + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) + .move_to_block_prior_to_terminal_block() + .with_terminal_block(|spec, el, _| async move { + el.engine().upcheck().await; + assert_eq!( + el.get_terminal_pow_block_hash(&spec, timestamp_now()) + .await + .unwrap(), + None + ) + }) + .await + .move_to_terminal_block() + .with_terminal_block(|spec, el, terminal_block| async move { + let timestamp = terminal_block.as_ref().map(|b| b.timestamp).unwrap(); + assert_eq!( + el.get_terminal_pow_block_hash(&spec, timestamp) + .await + .unwrap(), + None + ) + }) + .await; + } + #[tokio::test] async fn verifies_valid_terminal_block_hash() { let runtime = TestRuntime::default(); MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; assert_eq!( el.is_valid_terminal_pow_block_hash(terminal_block.unwrap().block_hash, &spec) .await @@ -1282,7 +1472,7 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; let invalid_terminal_block = terminal_block.unwrap().parent_hash; assert_eq!( @@ -1301,7 +1491,7 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; let missing_terminal_block = ExecutionBlockHash::repeat_byte(42); assert_eq!( @@ -1318,3 +1508,12 @@ mod test { fn noop(_: &ExecutionLayer, _: &ExecutionPayload) -> Option> { None } + +#[cfg(test)] +/// Returns the duration since the unix epoch. +fn timestamp_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_secs() +} diff --git a/beacon_node/execution_layer/src/payload_cache.rs b/beacon_node/execution_layer/src/payload_cache.rs new file mode 100644 index 0000000000..60a8f2a95c --- /dev/null +++ b/beacon_node/execution_layer/src/payload_cache.rs @@ -0,0 +1,33 @@ +use lru::LruCache; +use parking_lot::Mutex; +use tree_hash::TreeHash; +use types::{EthSpec, ExecutionPayload, Hash256}; + +pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10; + +/// A cache mapping execution payloads by tree hash roots. +pub struct PayloadCache { + payloads: Mutex>>, +} + +#[derive(Hash, PartialEq, Eq)] +struct PayloadCacheId(Hash256); + +impl Default for PayloadCache { + fn default() -> Self { + PayloadCache { + payloads: Mutex::new(LruCache::new(DEFAULT_PAYLOAD_CACHE_SIZE)), + } + } +} + +impl PayloadCache { + pub fn put(&self, payload: ExecutionPayload) -> Option> { + let root = payload.tree_hash_root(); + self.payloads.lock().put(PayloadCacheId(root), payload) + } + + pub fn pop(&self, root: &Hash256) -> Option> { + self.payloads.lock().pop(&PayloadCacheId(*root)) + } +} diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index b61092cf0e..3620a02dfb 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,10 +1,13 @@ -use crate::engine_api::{ - json_structures::{ - JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, - }, - ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, -}; use crate::engines::ForkChoiceState; +use crate::{ + engine_api::{ + json_structures::{ + JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, + }, + ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, + }, + ExecutionBlockWithTransactions, +}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use tree_hash::TreeHash; @@ -57,15 +60,39 @@ impl Block { block_number: block.block_number, parent_hash: block.parent_hash, total_difficulty: block.total_difficulty, + timestamp: block.timestamp, }, Block::PoS(payload) => ExecutionBlock { block_hash: payload.block_hash, block_number: payload.block_number, parent_hash: payload.parent_hash, total_difficulty, + timestamp: payload.timestamp, }, } } + + pub fn as_execution_block_with_tx(&self) -> Option> { + match self { + Block::PoS(payload) => Some(ExecutionBlockWithTransactions { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: vec![], + }), + Block::PoW(_) => None, + } + } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize, TreeHash)] @@ -75,8 +102,10 @@ pub struct PoWBlock { pub block_hash: ExecutionBlockHash, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, + pub timestamp: u64, } +#[derive(Clone)] pub struct ExecutionBlockGenerator { /* * Common database @@ -153,6 +182,14 @@ impl ExecutionBlockGenerator { .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } + pub fn execution_block_with_txs_by_hash( + &self, + hash: ExecutionBlockHash, + ) -> Option> { + self.block_by_hash(hash) + .and_then(|block| block.as_execution_block_with_tx()) + } + pub fn move_to_block_prior_to_terminal_block(&mut self) -> Result<(), String> { let target_block = self .terminal_block_number @@ -233,6 +270,26 @@ impl ExecutionBlockGenerator { Ok(()) } + pub fn modify_last_block(&mut self, block_modifier: impl FnOnce(&mut Block)) { + if let Some((last_block_hash, block_number)) = + self.block_hashes.keys().max().and_then(|block_number| { + self.block_hashes + .get(block_number) + .map(|block| (block, *block_number)) + }) + { + let mut block = self.blocks.remove(last_block_hash).unwrap(); + block_modifier(&mut block); + // Update the block hash after modifying the block + match &mut block { + Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), + Block::PoS(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), + } + self.block_hashes.insert(block_number, block.block_hash()); + self.blocks.insert(block.block_hash(), block); + } + } + pub fn get_payload(&mut self, id: &PayloadId) -> Option> { self.payload_ids.get(id).cloned() } @@ -279,7 +336,9 @@ impl ExecutionBlockGenerator { } let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash); - let unknown_safe_block_hash = !self.blocks.contains_key(&forkchoice_state.safe_block_hash); + let unknown_safe_block_hash = forkchoice_state.safe_block_hash + != ExecutionBlockHash::zero() + && !self.blocks.contains_key(&forkchoice_state.safe_block_hash); let unknown_finalized_block_hash = forkchoice_state.finalized_block_hash != ExecutionBlockHash::zero() && !self @@ -390,6 +449,7 @@ pub fn generate_pow_block( block_hash: ExecutionBlockHash::zero(), parent_hash, total_difficulty, + timestamp: block_number, }; block.block_hash = ExecutionBlockHash::from_root(block.tree_hash_root()); diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 772ac3c866..975f09fa5e 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -49,12 +49,30 @@ pub async fn handle_rpc( .map_err(|e| format!("unable to parse hash: {:?}", e)) })?; - Ok(serde_json::to_value( - ctx.execution_block_generator - .read() - .execution_block_by_hash(hash), - ) - .unwrap()) + // If we have a static response set, just return that. + if let Some(response) = *ctx.static_get_block_by_hash_response.lock() { + return Ok(serde_json::to_value(response).unwrap()); + } + + let full_tx = params + .get(1) + .and_then(JsonValue::as_bool) + .ok_or_else(|| "missing/invalid params[1] value".to_string())?; + if full_tx { + Ok(serde_json::to_value( + ctx.execution_block_generator + .read() + .execution_block_with_txs_by_hash(hash), + ) + .unwrap()) + } else { + Ok(serde_json::to_value( + ctx.execution_block_generator + .read() + .execution_block_by_hash(hash), + ) + .unwrap()) + } } ENGINE_NEW_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; @@ -120,6 +138,15 @@ pub async fn handle_rpc( Ok(serde_json::to_value(response).unwrap()) } + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => { + let block_generator = ctx.execution_block_generator.read(); + let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 { + terminal_total_difficulty: block_generator.terminal_total_difficulty, + terminal_block_hash: block_generator.terminal_block_hash, + terminal_block_number: block_generator.terminal_block_number, + }; + Ok(serde_json::to_value(transition_config).unwrap()) + } other => Err(format!( "The method {} does not exist/is not available", other diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs new file mode 100644 index 0000000000..6b565cb3d8 --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -0,0 +1,383 @@ +use crate::test_utils::DEFAULT_JWT_SECRET; +use crate::{Config, ExecutionLayer, PayloadAttributes}; +use async_trait::async_trait; +use eth2::types::{BlockId, StateId, ValidatorId}; +use eth2::{BeaconNodeHttpClient, Timeouts}; +use ethereum_consensus::crypto::{SecretKey, Signature}; +use ethereum_consensus::primitives::BlsPublicKey; +pub use ethereum_consensus::state_transition::Context; +use fork_choice::ForkchoiceUpdateParameters; +use mev_build_rs::{ + sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, + BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, + ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid, + SignedValidatorRegistration, +}; +use parking_lot::RwLock; +use sensitive_url::SensitiveUrl; +use ssz::{Decode, Encode}; +use ssz_rs::{Merkleized, SimpleSerialize}; +use std::collections::HashMap; +use std::fmt::Debug; +use std::net::Ipv4Addr; +use std::sync::Arc; +use std::time::Duration; +use task_executor::TaskExecutor; +use tempfile::NamedTempFile; +use tree_hash::TreeHash; +use types::{ + Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, Hash256, Slot, Uint256, +}; + +#[derive(Clone)] +pub enum Operation { + FeeRecipient(Address), + GasLimit(usize), + Value(usize), + ParentHash(Hash256), + PrevRandao(Hash256), + BlockNumber(usize), + Timestamp(usize), +} + +impl Operation { + fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + match self { + Operation::FeeRecipient(fee_recipient) => { + bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? + } + Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, + Operation::Value(value) => bid.value = to_ssz_rs(&Uint256::from(value))?, + Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, + Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, + Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, + Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64, + } + Ok(()) + } +} + +pub struct TestingBuilder { + server: BlindedBlockProviderServer>, + pub builder: MockBuilder, +} + +impl TestingBuilder { + pub fn new( + mock_el_url: SensitiveUrl, + builder_url: SensitiveUrl, + beacon_url: SensitiveUrl, + spec: ChainSpec, + executor: TaskExecutor, + ) -> Self { + let file = NamedTempFile::new().unwrap(); + let path = file.path().into(); + std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); + + // This EL should not talk to a builder + let config = Config { + execution_endpoints: vec![mock_el_url], + secret_files: vec![path], + suggested_fee_recipient: None, + ..Default::default() + }; + + let el = + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); + + // This should probably be done for all fields, we only update ones we are testing with so far. + let mut context = Context::for_mainnet(); + context.terminal_total_difficulty = to_ssz_rs(&spec.terminal_total_difficulty).unwrap(); + context.terminal_block_hash = to_ssz_rs(&spec.terminal_block_hash).unwrap(); + context.terminal_block_hash_activation_epoch = + to_ssz_rs(&spec.terminal_block_hash_activation_epoch).unwrap(); + + let builder = MockBuilder::new( + el, + BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), + spec, + context, + ); + let port = builder_url.full.port().unwrap(); + let host: Ipv4Addr = builder_url + .full + .host_str() + .unwrap() + .to_string() + .parse() + .unwrap(); + let server = BlindedBlockProviderServer::new(host, port, builder.clone()); + Self { server, builder } + } + + pub async fn run(&self) { + self.server.run().await + } +} + +#[derive(Clone)] +pub struct MockBuilder { + el: ExecutionLayer, + beacon_client: BeaconNodeHttpClient, + spec: ChainSpec, + context: Arc, + val_registration_cache: Arc>>, + builder_sk: SecretKey, + operations: Arc>>, + invalidate_signatures: Arc>, +} + +impl MockBuilder { + pub fn new( + el: ExecutionLayer, + beacon_client: BeaconNodeHttpClient, + spec: ChainSpec, + context: Context, + ) -> Self { + let sk = SecretKey::random(&mut rand::thread_rng()).unwrap(); + Self { + el, + beacon_client, + // Should keep spec and context consistent somehow + spec, + context: Arc::new(context), + val_registration_cache: Arc::new(RwLock::new(HashMap::new())), + builder_sk: sk, + operations: Arc::new(RwLock::new(vec![])), + invalidate_signatures: Arc::new(RwLock::new(false)), + } + } + + pub fn add_operation(&self, op: Operation) { + self.operations.write().push(op); + } + + pub fn invalid_signatures(&self) { + *self.invalidate_signatures.write() = true; + } + + pub fn valid_signatures(&mut self) { + *self.invalidate_signatures.write() = false; + } + + fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + let mut guard = self.operations.write(); + while let Some(op) = guard.pop() { + op.apply(bid)?; + } + Ok(()) + } +} + +#[async_trait] +impl mev_build_rs::BlindedBlockProvider for MockBuilder { + async fn register_validators( + &self, + registrations: &mut [SignedValidatorRegistration], + ) -> Result<(), BlindedBlockProviderError> { + for registration in registrations { + let pubkey = registration.message.public_key.clone(); + let message = &mut registration.message; + verify_signed_builder_message( + message, + ®istration.signature, + &pubkey, + &self.context, + )?; + self.val_registration_cache.write().insert( + registration.message.public_key.clone(), + registration.clone(), + ); + } + + Ok(()) + } + + async fn fetch_best_bid( + &self, + bid_request: &BidRequest, + ) -> Result { + let slot = Slot::new(bid_request.slot); + let signed_cached_data = self + .val_registration_cache + .read() + .get(&bid_request.public_key) + .ok_or_else(|| convert_err("missing registration"))? + .clone(); + let cached_data = signed_cached_data.message; + + let head = self + .beacon_client + .get_beacon_blocks::(BlockId::Head) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing head block"))?; + + let block = head.data.message_merge().map_err(convert_err)?; + let head_block_root = block.tree_hash_root(); + let head_execution_hash = block.body.execution_payload.execution_payload.block_hash; + if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { + return Err(BlindedBlockProviderError::Custom(format!( + "head mismatch: {} {}", + head_execution_hash, bid_request.parent_hash + ))); + } + + let finalized_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Finalized) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing finalized block"))? + .data + .message_merge() + .map_err(convert_err)? + .body + .execution_payload + .execution_payload + .block_hash; + + let justified_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Justified) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing finalized block"))? + .data + .message_merge() + .map_err(convert_err)? + .body + .execution_payload + .execution_payload + .block_hash; + + let val_index = self + .beacon_client + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(from_ssz_rs(&cached_data.public_key)?), + ) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing validator from state"))? + .data + .index; + let fee_recipient = from_ssz_rs(&cached_data.fee_recipient)?; + let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); + + let genesis_time = self + .beacon_client + .get_beacon_genesis() + .await + .map_err(convert_err)? + .data + .genesis_time; + let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; + + let head_state: BeaconState = self + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(convert_err)? + .ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? + .data; + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(convert_err)?; + + let payload_attributes = PayloadAttributes { + timestamp, + prev_randao: *prev_randao, + suggested_fee_recipient: fee_recipient, + }; + + self.el + .insert_proposer(slot, head_block_root, val_index, payload_attributes) + .await; + + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: Hash256::zero(), + head_hash: None, + justified_hash: Some(justified_execution_hash), + finalized_hash: Some(finalized_execution_hash), + }; + + let payload = self + .el + .get_full_payload_caching::>( + head_execution_hash, + timestamp, + *prev_randao, + fee_recipient, + forkchoice_update_params, + ) + .await + .map_err(convert_err)? + .to_execution_payload_header(); + + let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; + let mut header: ServerPayloadHeader = + serde_json::from_str(json_payload.as_str()).map_err(convert_err)?; + + header.gas_limit = cached_data.gas_limit; + + let mut message = BuilderBid { + header, + value: ssz_rs::U256::default(), + public_key: self.builder_sk.public_key(), + }; + + self.apply_operations(&mut message)?; + + let mut signature = + sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?; + + if *self.invalidate_signatures.read() { + signature = Signature::default(); + } + + let signed_bid = SignedBuilderBid { message, signature }; + Ok(signed_bid) + } + + async fn open_bid( + &self, + signed_block: &mut SignedBlindedBeaconBlock, + ) -> Result { + let payload = self + .el + .get_payload_by_root(&from_ssz_rs( + &signed_block + .message + .body + .execution_payload_header + .hash_tree_root() + .map_err(convert_err)?, + )?) + .ok_or_else(|| convert_err("missing payload for tx root"))?; + + let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; + serde_json::from_str(json_payload.as_str()).map_err(convert_err) + } +} + +pub fn from_ssz_rs( + ssz_rs_data: &T, +) -> Result { + U::from_ssz_bytes( + ssz_rs::serialize(ssz_rs_data) + .map_err(convert_err)? + .as_ref(), + ) + .map_err(convert_err) +} + +pub fn to_ssz_rs( + ssz_data: &T, +) -> Result { + ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) +} + +fn convert_err(e: E) -> BlindedBlockProviderError { + BlindedBlockProviderError::Custom(format!("{e:?}")) +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 707a7c0c3e..cab2367cd0 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,10 +1,13 @@ use crate::{ - test_utils::{MockServer, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, JWT_SECRET}, + test_utils::{ + MockServer, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, + }, Config, *, }; use sensitive_url::SensitiveUrl; use task_executor::TaskExecutor; use tempfile::NamedTempFile; +use tree_hash::TreeHash; use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; pub struct MockExecutionLayer { @@ -22,6 +25,7 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), Epoch::new(0), + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), None, ) } @@ -32,6 +36,7 @@ impl MockExecutionLayer { terminal_block: u64, terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, + jwt_key: Option, builder_url: Option, ) -> Self { let handle = executor.handle().unwrap(); @@ -41,8 +46,10 @@ impl MockExecutionLayer { spec.terminal_block_hash = terminal_block_hash; spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch; + let jwt_key = jwt_key.unwrap_or_else(JwtKey::random); let server = MockServer::new( &handle, + jwt_key, terminal_total_difficulty, terminal_block, terminal_block_hash, @@ -52,7 +59,7 @@ impl MockExecutionLayer { let file = NamedTempFile::new().unwrap(); let path = file.path().into(); - std::fs::write(&path, hex::encode(JWT_SECRET)).unwrap(); + std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); let config = Config { execution_endpoints: vec![url], @@ -82,11 +89,16 @@ impl MockExecutionLayer { let block_number = latest_execution_block.block_number() + 1; let timestamp = block_number; let prev_randao = Hash256::from_low_u64_be(block_number); - let finalized_block_hash = parent_hash; + let head_block_root = Hash256::repeat_byte(42); + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: head_block_root, + head_hash: Some(parent_hash), + justified_hash: None, + finalized_hash: None, + }; // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); - let head_block_root = Hash256::repeat_byte(42); let validator_index = 0; self.el .insert_proposer( @@ -105,6 +117,7 @@ impl MockExecutionLayer { .notify_forkchoice_updated( parent_hash, ExecutionBlockHash::zero(), + ExecutionBlockHash::zero(), slot, head_block_root, ) @@ -112,16 +125,21 @@ impl MockExecutionLayer { .unwrap(); let validator_index = 0; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot, + chain_health: ChainHealth::Healthy, + }; let payload = self .el .get_payload::>( parent_hash, timestamp, prev_randao, - finalized_block_hash, validator_index, - None, - slot, + forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() @@ -132,6 +150,43 @@ impl MockExecutionLayer { assert_eq!(payload.timestamp, timestamp); assert_eq!(payload.prev_randao, prev_randao); + // Ensure the payload cache is empty. + assert!(self + .el + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot, + chain_health: ChainHealth::Healthy, + }; + let payload_header = self + .el + .get_payload::>( + parent_hash, + timestamp, + prev_randao, + validator_index, + forkchoice_update_params, + builder_params, + &self.spec, + ) + .await + .unwrap() + .execution_payload_header; + assert_eq!(payload_header.block_hash, block_hash); + assert_eq!(payload_header.parent_hash, parent_hash); + assert_eq!(payload_header.block_number, block_number); + assert_eq!(payload_header.timestamp, timestamp); + assert_eq!(payload_header.prev_randao, prev_randao); + + // Ensure the payload cache has the correct payload. + assert_eq!( + self.el + .get_payload_by_root(&payload_header.tree_hash_root()), + Some(payload.clone()) + ); + let status = self.el.notify_new_payload(&payload).await.unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -142,6 +197,7 @@ impl MockExecutionLayer { .notify_forkchoice_updated( block_hash, ExecutionBlockHash::zero(), + ExecutionBlockHash::zero(), slot, head_block_root, ) diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 805f6716fb..462e34e910 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -2,11 +2,11 @@ use crate::engine_api::auth::JwtKey; use crate::engine_api::{ - auth::Auth, http::JSONRPC_VERSION, PayloadStatusV1, PayloadStatusV1Status, + auth::Auth, http::JSONRPC_VERSION, ExecutionBlock, PayloadStatusV1, PayloadStatusV1Status, }; use bytes::Bytes; use environment::null_logger; -use execution_block_generator::{Block, PoWBlock}; +use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; @@ -21,17 +21,40 @@ use tokio::{runtime, sync::oneshot}; use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; -pub use execution_block_generator::{generate_pow_block, ExecutionBlockGenerator}; +pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; +pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; -pub const JWT_SECRET: [u8; 32] = [42; 32]; +pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; mod execution_block_generator; mod handle_rpc; +mod mock_builder; mod mock_execution_layer; +/// Configuration for the MockExecutionLayer. +pub struct MockExecutionConfig { + pub server_config: Config, + pub jwt_key: JwtKey, + pub terminal_difficulty: Uint256, + pub terminal_block: u64, + pub terminal_block_hash: ExecutionBlockHash, +} + +impl Default for MockExecutionConfig { + fn default() -> Self { + Self { + jwt_key: JwtKey::random(), + terminal_difficulty: DEFAULT_TERMINAL_DIFFICULTY.into(), + terminal_block: DEFAULT_TERMINAL_BLOCK, + terminal_block_hash: ExecutionBlockHash::zero(), + server_config: Config::default(), + } + } +} + pub struct MockServer { _shutdown_tx: oneshot::Sender<()>, listen_socket_addr: SocketAddr, @@ -43,25 +66,29 @@ impl MockServer { pub fn unit_testing() -> Self { Self::new( &runtime::Handle::current(), + JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), ) } - pub fn new( - handle: &runtime::Handle, - terminal_difficulty: Uint256, - terminal_block: u64, - terminal_block_hash: ExecutionBlockHash, - ) -> Self { + pub fn new_with_config(handle: &runtime::Handle, config: MockExecutionConfig) -> Self { + let MockExecutionConfig { + jwt_key, + terminal_difficulty, + terminal_block, + terminal_block_hash, + server_config, + } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); let execution_block_generator = ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash); let ctx: Arc> = Arc::new(Context { - config: <_>::default(), + config: server_config, + jwt_key, log: null_logger().unwrap(), last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), @@ -69,6 +96,7 @@ impl MockServer { preloaded_responses, static_new_payload_response: <_>::default(), static_forkchoice_updated_response: <_>::default(), + static_get_block_by_hash_response: <_>::default(), _phantom: PhantomData, }); @@ -99,6 +127,25 @@ impl MockServer { } } + pub fn new( + handle: &runtime::Handle, + jwt_key: JwtKey, + terminal_difficulty: Uint256, + terminal_block: u64, + terminal_block_hash: ExecutionBlockHash, + ) -> Self { + Self::new_with_config( + handle, + MockExecutionConfig { + server_config: Config::default(), + jwt_key, + terminal_difficulty, + terminal_block, + terminal_block_hash, + }, + ) + } + pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator> { self.ctx.execution_block_generator.write() } @@ -271,6 +318,16 @@ impl MockServer { self.set_forkchoice_updated_response(Self::invalid_terminal_block_status()); } + /// This will make the node appear like it is syncing. + pub fn all_get_block_by_hash_requests_return_none(&self) { + *self.ctx.static_get_block_by_hash_response.lock() = Some(None); + } + + /// The node will respond "naturally"; it will return blocks if they're known to it. + pub fn all_get_block_by_hash_requests_return_natural_value(&self) { + *self.ctx.static_get_block_by_hash_response.lock() = None; + } + /// Disables any static payload responses so the execution block generator will do its own /// verification. pub fn full_payload_verification(&self) { @@ -290,6 +347,7 @@ impl MockServer { block_hash, parent_hash, total_difficulty, + timestamp: block_number, }); self.ctx @@ -351,6 +409,7 @@ impl warp::reject::Reject for AuthError {} /// The server will gracefully handle the case where any fields are `None`. pub struct Context { pub config: Config, + pub jwt_key: JwtKey, pub log: Logger, pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, @@ -358,6 +417,7 @@ pub struct Context { pub previous_request: Arc>>, pub static_new_payload_response: Arc>>, pub static_forkchoice_updated_response: Arc>>, + pub static_get_block_by_hash_response: Arc>>>, pub _phantom: PhantomData, } @@ -386,28 +446,30 @@ struct ErrorMessage { /// Returns a `warp` header which filters out request that has a missing or incorrectly /// signed JWT token. -fn auth_header_filter() -> warp::filters::BoxedFilter<()> { +fn auth_header_filter(jwt_key: JwtKey) -> warp::filters::BoxedFilter<()> { warp::any() .and(warp::filters::header::optional("Authorization")) - .and_then(move |authorization: Option| async move { - match authorization { - None => Err(warp::reject::custom(AuthError( - "auth absent from request".to_string(), - ))), - Some(auth) => { - if let Some(token) = auth.strip_prefix("Bearer ") { - let secret = JwtKey::from_slice(&JWT_SECRET).unwrap(); - match Auth::validate_token(token, &secret) { - Ok(_) => Ok(()), - Err(e) => Err(warp::reject::custom(AuthError(format!( - "Auth failure: {:?}", - e - )))), + .and_then(move |authorization: Option| { + let secret = jwt_key.clone(); + async move { + match authorization { + None => Err(warp::reject::custom(AuthError( + "auth absent from request".to_string(), + ))), + Some(auth) => { + if let Some(token) = auth.strip_prefix("Bearer ") { + match Auth::validate_token(token, &secret) { + Ok(_) => Ok(()), + Err(e) => Err(warp::reject::custom(AuthError(format!( + "Auth failure: {:?}", + e + )))), + } + } else { + Err(warp::reject::custom(AuthError( + "Bearer token not present in auth header".to_string(), + ))) } - } else { - Err(warp::reject::custom(AuthError( - "Bearer token not present in auth header".to_string(), - ))) } } } @@ -523,7 +585,7 @@ pub fn serve( }); let routes = warp::post() - .and(auth_header_filter()) + .and(auth_header_filter(ctx.jwt_key.clone())) .and(root.or(echo)) .recover(handle_rejection) // Add a `Server` header. diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 07fb992393..fedd66c540 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -32,14 +32,16 @@ parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" +tree_hash = "0.4.1" [dev-dependencies] store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } logging = { path = "../../common/logging" } serde_json = "1.0.58" +proto_array = { path = "../../consensus/proto_array" } +unused_port = {path = "../../common/unused_port"} [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 35a35bcb74..9febae5b19 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -60,11 +60,17 @@ fn cached_attestation_duties( ) -> Result { let head_block_root = chain.canonical_head.cached_head().head_block_root(); - let (duties, dependent_root, _execution_status) = chain + let (duties, dependent_root, execution_status) = chain .validator_attestation_duties(request_indices, request_epoch, head_block_root) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(duties, request_indices, dependent_root, chain) + convert_to_api_response( + duties, + request_indices, + dependent_root, + execution_status.is_optimistic_or_invalid(), + chain, + ) } /// Compute some attester duties by reading a `BeaconState` from disk, completely ignoring the @@ -76,35 +82,42 @@ fn compute_historic_attester_duties( ) -> Result { // If the head is quite old then it might still be relevant for a historical request. // - // Use the `with_head` function to read & clone in a single call to avoid race conditions. - let state_opt = chain - .with_head(|head| { - if head.beacon_state.current_epoch() <= request_epoch { - Ok(Some(( - head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), - ))) - } else { - Ok(None) - } - }) - .map_err(warp_utils::reject::beacon_chain_error)?; + // Avoid holding the `cached_head` longer than necessary. + let state_opt = { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + let head = &cached_head.snapshot; - let mut state = if let Some((state_root, mut state)) = state_opt { - // If we've loaded the head state it might be from a previous epoch, ensure it's in a - // suitable epoch. - ensure_state_knows_attester_duties_for_epoch( - &mut state, - state_root, - request_epoch, - &chain.spec, - )?; - state - } else { - StateId::slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + if head.beacon_state.current_epoch() <= request_epoch { + Some(( + head.beacon_state_root(), + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + execution_status.is_optimistic_or_invalid(), + )) + } else { + None + } }; + let (mut state, execution_optimistic) = + if let Some((state_root, mut state, execution_optimistic)) = state_opt { + // If we've loaded the head state it might be from a previous epoch, ensure it's in a + // suitable epoch. + ensure_state_knows_attester_duties_for_epoch( + &mut state, + state_root, + request_epoch, + &chain.spec, + )?; + (state, execution_optimistic) + } else { + StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(chain)? + }; + // Sanity-check the state lookup. if !(state.current_epoch() == request_epoch || state.current_epoch() + 1 == request_epoch) { return Err(warp_utils::reject::custom_server_error(format!( @@ -140,7 +153,13 @@ fn compute_historic_attester_duties( .collect::>() .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(duties, request_indices, dependent_root, chain) + convert_to_api_response( + duties, + request_indices, + dependent_root, + execution_optimistic, + chain, + ) } fn ensure_state_knows_attester_duties_for_epoch( @@ -178,6 +197,7 @@ fn convert_to_api_response( duties: Vec>, indices: &[u64], dependent_root: Hash256, + execution_optimistic: bool, chain: &BeaconChain, ) -> Result { // Protect against an inconsistent slot clock. @@ -213,6 +233,7 @@ fn convert_to_api_response( Ok(api_types::DutiesResponse { dependent_root, + execution_optimistic: Some(execution_optimistic), data, }) } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 73f50985bd..e418849040 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,8 +1,10 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes, WhenSlotSkipped}; +use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlockId as CoreBlockId; +use std::fmt; use std::str::FromStr; use std::sync::Arc; -use types::{BlindedPayload, Hash256, SignedBeaconBlock, Slot}; +use types::{Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -22,32 +24,78 @@ impl BlockId { pub fn root( &self, chain: &BeaconChain, - ) -> Result { + ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { match &self.0 { - CoreBlockId::Head => Ok(chain.canonical_head.cached_head().head_block_root()), - CoreBlockId::Genesis => Ok(chain.genesis_block_root), - CoreBlockId::Finalized => Ok(chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .root), - CoreBlockId::Justified => Ok(chain - .canonical_head - .cached_head() - .justified_checkpoint() - .root), - CoreBlockId::Slot(slot) => chain - .block_root_at_slot(*slot, WhenSlotSkipped::None) - .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|root_opt| { - root_opt.ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "beacon block at slot {}", - slot - )) - }) - }), - CoreBlockId::Root(root) => Ok(*root), + CoreBlockId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok(( + cached_head.head_block_root(), + execution_status.is_optimistic_or_invalid(), + )) + } + CoreBlockId::Genesis => Ok((chain.genesis_block_root, false)), + CoreBlockId::Finalized => { + let finalized_checkpoint = + chain.canonical_head.cached_head().finalized_checkpoint(); + let (_slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; + Ok((finalized_checkpoint.root, execution_optimistic)) + } + CoreBlockId::Justified => { + let justified_checkpoint = + chain.canonical_head.cached_head().justified_checkpoint(); + let (_slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; + Ok((justified_checkpoint.root, execution_optimistic)) + } + CoreBlockId::Slot(slot) => { + let execution_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + let root = chain + .block_root_at_slot(*slot, WhenSlotSkipped::None) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block at slot {}", + slot + )) + }) + })?; + Ok((root, execution_optimistic)) + } + CoreBlockId::Root(root) => { + // This matches the behaviour of other consensus clients (e.g. Teku). + if root == &Hash256::zero() { + return Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))); + }; + if chain + .store + .block_exists(root) + .map_err(BeaconChainError::DBError) + .map_err(warp_utils::reject::beacon_chain_error)? + { + let execution_optimistic = chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok((*root, execution_optimistic)) + } else { + return Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))); + } + } } } @@ -55,11 +103,20 @@ impl BlockId { pub fn blinded_block( &self, chain: &BeaconChain, - ) -> Result>, warp::Rejection> { + ) -> Result<(SignedBlindedBeaconBlock, ExecutionOptimistic), warp::Rejection> { match &self.0 { - CoreBlockId::Head => Ok(chain.head_beacon_block().clone_as_blinded()), + CoreBlockId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok(( + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + )) + } CoreBlockId::Slot(slot) => { - let root = self.root(chain)?; + let (root, execution_optimistic) = self.root(chain)?; chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -71,7 +128,7 @@ impl BlockId { slot ))); } - Ok(block) + Ok((block, execution_optimistic)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -80,8 +137,8 @@ impl BlockId { }) } _ => { - let root = self.root(chain)?; - chain + let (root, execution_optimistic) = self.root(chain)?; + let block = chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) .and_then(|root_opt| { @@ -91,7 +148,8 @@ impl BlockId { root )) }) - }) + })?; + Ok((block, execution_optimistic)) } } } @@ -100,11 +158,20 @@ impl BlockId { pub async fn full_block( &self, chain: &BeaconChain, - ) -> Result>, warp::Rejection> { + ) -> Result<(Arc>, ExecutionOptimistic), warp::Rejection> { match &self.0 { - CoreBlockId::Head => Ok(chain.head_beacon_block()), + CoreBlockId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok(( + cached_head.snapshot.beacon_block.clone(), + execution_status.is_optimistic_or_invalid(), + )) + } CoreBlockId::Slot(slot) => { - let root = self.root(chain)?; + let (root, execution_optimistic) = self.root(chain)?; chain .get_block(&root) .await @@ -117,7 +184,7 @@ impl BlockId { slot ))); } - Ok(Arc::new(block)) + Ok((Arc::new(block), execution_optimistic)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -126,18 +193,20 @@ impl BlockId { }) } _ => { - let root = self.root(chain)?; + let (root, execution_optimistic) = self.root(chain)?; chain .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) .and_then(|block_opt| { - block_opt.map(Arc::new).ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "beacon block with root {}", - root - )) - }) + block_opt + .map(|block| (Arc::new(block), execution_optimistic)) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) }) } } @@ -151,3 +220,9 @@ impl FromStr for BlockId { CoreBlockId::from_str(s).map(Self) } } + +impl fmt::Display for BlockId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index a15b52582c..78ebe3302f 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,19 +13,18 @@ mod block_rewards; mod database; mod metrics; mod proposer_duties; +mod publish_blocks; mod state_id; mod sync_committees; mod validator_inclusion; mod version; use beacon_chain::{ - attestation_verification::VerifiedAttestation, - observed_operations::ObservationOutcome, - validator_monitor::{get_block_delay_ms, timestamp_now}, - AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - ProduceBlockVerification, WhenSlotSkipped, + attestation_verification::VerifiedAttestation, observed_operations::ObservationOutcome, + validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, + BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; -use block_id::BlockId; +pub use block_id::BlockId; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -34,7 +33,7 @@ use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -use state_id::StateId; +pub use state_id::StateId; use std::borrow::Cow; use std::convert::TryInto; use std::future::Future; @@ -45,16 +44,16 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttestationData, AttesterSlashing, BeaconBlockBodyMerge, BeaconBlockMerge, - BeaconStateError, BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, - FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, - SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncCommitteeMessage, SyncContributionData, + Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, + CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, + ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, + SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, + SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, + SyncContributionData, }; use version::{ - add_consensus_version_header, fork_versioned_response, inconsistent_fork_rejection, - unsupported_version_rejection, V1, + add_consensus_version_header, execution_optimistic_fork_versioned_response, + fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2, }; use warp::http::StatusCode; use warp::sse::Event; @@ -77,6 +76,9 @@ const SYNC_TOLERANCE_EPOCHS: u64 = 8; /// A custom type which allows for both unsecured and TLS-enabled HTTP servers. type HttpServer = (SocketAddr, Pin + Send>>); +/// Alias for readability. +pub type ExecutionOptimistic = bool; + /// Configuration used when serving the HTTP server over TLS. #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] pub struct TlsConfig { @@ -304,7 +306,7 @@ pub fn serve( .untuple_one() }; - let eth1_v1 = single_version(V1); + let eth_v1 = single_version(V1); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -413,7 +415,7 @@ pub fn serve( */ // GET beacon/genesis - let get_beacon_genesis = eth1_v1 + let get_beacon_genesis = eth_v1 .and(warp::path("beacon")) .and(warp::path("genesis")) .and(warp::path::end()) @@ -433,7 +435,7 @@ pub fn serve( * beacon/states/{state_id} */ - let beacon_states_path = eth1_v1 + let beacon_states_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("states")) .and(warp::path::param::().or_else(|_| async { @@ -450,10 +452,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - state_id - .root(&chain) + let (root, execution_optimistic) = state_id.root(&chain)?; + + Ok(root) .map(api_types::RootData::from) .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) }) }); @@ -463,7 +467,14 @@ pub fn serve( .and(warp::path("fork")) .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { - blocking_json_task(move || state_id.fork(&chain).map(api_types::GenericResponse::from)) + blocking_json_task(move || { + let (fork, execution_optimistic) = + state_id.fork_and_execution_optimistic(&chain)?; + Ok(api_types::ExecutionOptimisticResponse { + data: fork, + execution_optimistic: Some(execution_optimistic), + }) + }) }); // GET beacon/states/{state_id}/finality_checkpoints @@ -473,15 +484,24 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - state_id - .map_state(&chain, |state| { - Ok(api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }) - }) - .map(api_types::GenericResponse::from) + let (data, execution_optimistic) = state_id.map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + Ok(( + api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }); @@ -497,35 +517,45 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - state_id - .map_state(&chain, |state| { - Ok(state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 - } + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + query.id.as_ref().map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => { + &validator.pubkey == pubkey + } + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) }) - }) - }) - .map(|(index, (_, balance))| { - Some(api_types::ValidatorBalanceData { - index: index as u64, - balance: *balance, - }) - }) - .collect::>()) - }) - .map(api_types::GenericResponse::from) + .map(|(index, (_, balance))| { + Some(api_types::ValidatorBalanceData { + index: index as u64, + balance: *balance, + }) + }) + .collect::>(), + execution_optimistic, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }, ); @@ -542,57 +572,67 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - state_id - .map_state(&chain, |state| { - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; - Ok(state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + query.id.as_ref().map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => { + &validator.pubkey == pubkey + } + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) + }) + // filter by status(es) if provided and map the result + .filter_map(|(index, (validator, balance))| { + let status = api_types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ); + + let status_matches = + query.status.as_ref().map_or(true, |statuses| { + statuses.contains(&status) + || statuses.contains(&status.superstatus()) + }); + + if status_matches { + Some(api_types::ValidatorData { + index: index as u64, + balance: *balance, + status, + validator: validator.clone(), + }) + } else { + None } }) - }) - }) - // filter by status(es) if provided and map the result - .filter_map(|(index, (validator, balance))| { - let status = api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ); + .collect::>(), + execution_optimistic, + )) + }, + )?; - let status_matches = - query.status.as_ref().map_or(true, |statuses| { - statuses.contains(&status) - || statuses.contains(&status.superstatus()) - }); - - if status_matches { - Some(api_types::ValidatorData { - index: index as u64, - balance: *balance, - status, - validator: validator.clone(), - }) - } else { - None - } - }) - .collect::>()) - }) - .map(api_types::GenericResponse::from) + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }, ); @@ -610,41 +650,51 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { blocking_json_task(move || { - state_id - .map_state(&chain, |state| { - let index_opt = match &validator_id { - ValidatorId::PublicKey(pubkey) => { - state.validators().iter().position(|v| v.pubkey == *pubkey) - } - ValidatorId::Index(index) => Some(*index as usize), - }; + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => { + state.validators().iter().position(|v| v.pubkey == *pubkey) + } + ValidatorId::Index(index) => Some(*index as usize), + }; - index_opt - .and_then(|index| { - let validator = state.validators().get(index)?; - let balance = *state.balances().get(index)?; - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; + Ok(( + index_opt + .and_then(|index| { + let validator = state.validators().get(index)?; + let balance = *state.balances().get(index)?; + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; - Some(api_types::ValidatorData { - index: index as u64, - balance, - status: api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ), - validator: validator.clone(), - }) - }) - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "unknown validator: {}", - validator_id - )) - }) - }) - .map(api_types::GenericResponse::from) + Some(api_types::ValidatorData { + index: index as u64, + balance, + status: api_types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "unknown validator: {}", + validator_id + )) + })?, + execution_optimistic, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }, ); @@ -658,86 +708,98 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, query: api_types::CommitteesQuery| { blocking_json_task(move || { - state_id.map_state(&chain, |state| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); - let committee_cache = match RelativeEpoch::from_epoch(current_epoch, epoch) - { - Ok(relative_epoch) - if state.committee_cache_is_initialized(relative_epoch) => - { - state.committee_cache(relative_epoch).map(Cow::Borrowed) - } - _ => CommitteeCache::initialized(state, epoch, &chain.spec) - .map(Cow::Owned), - } - .map_err(|e| match e { - BeaconStateError::EpochOutOfBounds => { - let max_sprp = T::EthSpec::slots_per_historical_root() as u64; - let first_subsequent_restore_point_slot = - ((epoch.start_slot(T::EthSpec::slots_per_epoch()) / max_sprp) - + 1) - * max_sprp; - if epoch < current_epoch { - warp_utils::reject::custom_bad_request(format!( - "epoch out of bounds, try state at slot {}", - first_subsequent_restore_point_slot, - )) - } else { - warp_utils::reject::custom_bad_request( - "epoch out of bounds, too far in future".into(), - ) + let committee_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state + .committee_cache_is_initialized(relative_epoch) => + { + state.committee_cache(relative_epoch).map(Cow::Borrowed) + } + _ => CommitteeCache::initialized(state, epoch, &chain.spec) + .map(Cow::Owned), + } + .map_err(|e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() as u64; + let first_subsequent_restore_point_slot = ((epoch + .start_slot(T::EthSpec::slots_per_epoch()) + / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request(format!( + "epoch out of bounds, try state at slot {}", + first_subsequent_restore_point_slot, + )) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, too far in future".into(), + ) + } + } + _ => warp_utils::reject::beacon_chain_error(e.into()), + })?; + + // Use either the supplied slot or all slots in the epoch. + let slots = + query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); + + // Use either the supplied committee index or all available indices. + let indices = + query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); + + let mut response = Vec::with_capacity(slots.len() * indices.len()); + + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request( + format!("{} is not in epoch {}", slot, epoch), + )); + } + + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; + + response.push(api_types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } } - } - _ => warp_utils::reject::beacon_chain_error(e.into()), - })?; - // Use either the supplied slot or all slots in the epoch. - let slots = query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { - epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() - }); - - // Use either the supplied committee index or all available indices. - let indices = query.index.map(|index| vec![index]).unwrap_or_else(|| { - (0..committee_cache.committees_per_slot()).collect() - }); - - let mut response = Vec::with_capacity(slots.len() * indices.len()); - - for slot in slots { - // It is not acceptable to query with a slot that is not within the - // specified epoch. - if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { - return Err(warp_utils::reject::custom_bad_request(format!( - "{} is not in epoch {}", - slot, epoch - ))); - } - - for &index in &indices { - let committee = committee_cache - .get_beacon_committee(slot, index) - .ok_or_else(|| { - warp_utils::reject::custom_bad_request(format!( - "committee index {} does not exist in epoch {}", - index, epoch - )) - })?; - - response.push(api_types::CommitteeData { - index, - slot, - validators: committee - .committee - .iter() - .map(|i| *i as u64) - .collect(), - }); - } - } - - Ok(api_types::GenericResponse::from(response)) + Ok((response, execution_optimistic)) + }, + )?; + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), }) }) }, @@ -754,28 +816,35 @@ pub fn serve( chain: Arc>, query: api_types::SyncCommitteesQuery| { blocking_json_task(move || { - let sync_committee = state_id.map_state(&chain, |state| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - state - .get_built_sync_committee(epoch, &chain.spec) - .map(|committee| committee.clone()) - .map_err(|e| match e { - BeaconStateError::SyncCommitteeNotKnown { .. } => { - warp_utils::reject::custom_bad_request(format!( + let (sync_committee, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + Ok(( + state + .get_built_sync_committee(epoch, &chain.spec) + .map(|committee| committee.clone()) + .map_err(|e| match e { + BeaconStateError::SyncCommitteeNotKnown { .. } => { + warp_utils::reject::custom_bad_request(format!( "state at epoch {} has no sync committee for epoch {}", current_epoch, epoch )) - } - BeaconStateError::IncorrectStateVariant => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} is not activated for Altair", - current_epoch, - )) - } - e => warp_utils::reject::beacon_state_error(e), - }) - })?; + } + BeaconStateError::IncorrectStateVariant => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} is not activated for Altair", + current_epoch, + )) + } + e => warp_utils::reject::beacon_state_error(e), + })?, + execution_optimistic, + )) + }, + )?; let validators = chain .validator_indices(sync_committee.pubkeys.iter()) @@ -793,7 +862,8 @@ pub fn serve( validator_aggregates, }; - Ok(api_types::GenericResponse::from(response)) + Ok(api_types::GenericResponse::from(response) + .add_execution_optimistic(execution_optimistic)) }) }, ); @@ -805,7 +875,7 @@ pub fn serve( // things. Returning non-canonical things is hard for us since we don't already have a // mechanism for arbitrary forwards block iteration, we only support iterating forwards along // the canonical chain. - let get_beacon_headers = eth1_v1 + let get_beacon_headers = eth_v1 .and(warp::path("beacon")) .and(warp::path("headers")) .and(warp::query::()) @@ -814,15 +884,24 @@ pub fn serve( .and_then( |query: api_types::HeadersQuery, chain: Arc>| { blocking_json_task(move || { - let (root, block) = match (query.slot, query.parent_root) { + let (root, block, execution_optimistic) = match (query.slot, query.parent_root) + { // No query parameters, return the canonical head block. (None, None) => { - let block = chain.head_beacon_block(); - (block.canonical_root(), block.clone_as_blinded()) + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + ( + cached_head.head_block_root(), + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + ) } // Only the parent root parameter, do a forwards-iterator lookup. (None, Some(parent_root)) => { - let parent = BlockId::from_root(parent_root).blinded_block(&chain)?; + let (parent, execution_optimistic) = + BlockId::from_root(parent_root).blinded_block(&chain)?; let (root, _slot) = chain .forwards_iter_block_roots(parent.slot()) .map_err(warp_utils::reject::beacon_chain_error)? @@ -841,13 +920,21 @@ pub fn serve( BlockId::from_root(root) .blinded_block(&chain) - .map(|block| (root, block))? + // Ignore this `execution_optimistic` since the first value has + // more information about the original request. + .map(|(block, _execution_optimistic)| { + (root, block, execution_optimistic) + })? } // Slot is supplied, search by slot and optionally filter by // parent root. (Some(slot), parent_root_opt) => { - let root = BlockId::from_slot(slot).root(&chain)?; - let block = BlockId::from_root(root).blinded_block(&chain)?; + let (root, execution_optimistic) = + BlockId::from_slot(slot).root(&chain)?; + // Ignore the second `execution_optimistic`, the first one is the + // most relevant since it knows that we queried by slot. + let (block, _execution_optimistic) = + BlockId::from_root(root).blinded_block(&chain)?; // If the parent root was supplied, check that it matches the block // obtained via a slot lookup. @@ -860,7 +947,7 @@ pub fn serve( } } - (root, block) + (root, block, execution_optimistic) } }; @@ -873,13 +960,14 @@ pub fn serve( }, }; - Ok(api_types::GenericResponse::from(vec![data])) + Ok(api_types::GenericResponse::from(vec![data]) + .add_execution_optimistic(execution_optimistic)) }) }, ); // GET beacon/headers/{block_id} - let get_beacon_headers_block_id = eth1_v1 + let get_beacon_headers_block_id = eth_v1 .and(warp::path("beacon")) .and(warp::path("headers")) .and(warp::path::param::().or_else(|_| async { @@ -891,8 +979,11 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let root = block_id.root(&chain)?; - let block = BlockId::from_root(root).blinded_block(&chain)?; + let (root, execution_optimistic) = block_id.root(&chain)?; + // Ignore the second `execution_optimistic` since the first one has more + // information about the original request. + let (block, _execution_optimistic) = + BlockId::from_root(root).blinded_block(&chain)?; let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) @@ -908,7 +999,10 @@ pub fn serve( }, }; - Ok(api_types::GenericResponse::from(data)) + Ok(api_types::ExecutionOptimisticResponse { + execution_optimistic: Some(execution_optimistic), + data, + }) }) }); @@ -917,7 +1011,7 @@ pub fn serve( */ // POST beacon/blocks - let post_beacon_blocks = eth1_v1 + let post_beacon_blocks = eth_v1 .and(warp::path("beacon")) .and(warp::path("blocks")) .and(warp::path::end()) @@ -930,81 +1024,9 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - let seen_timestamp = timestamp_now(); - - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message(&network_tx, PubsubMessage::BeaconBlock(block.clone()))?; - - // Determine the delay after the start of the slot, register it with metrics. - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); - - match chain.process_block(block.clone()).await { - Ok(root) => { - info!( - log, - "Valid block from HTTP API"; - "block_delay" => ?delay, - "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), - "slot" => block.slot(), - ); - - // Notify the validator monitor. - chain.validator_monitor.read().register_api_block( - seen_timestamp, - block.message(), - root, - &chain.slot_clock, - ); - - // Update the head since it's likely this block will become the new - // head. - chain - .recompute_head_at_current_slot() - .await - .map_err(warp_utils::reject::beacon_chain_error)?; - - // Perform some logging to inform users if their blocks are being produced - // late. - // - // Check to see the thresholds are non-zero to avoid logging errors with small - // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } else if delay >= error_threshold { - error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } - - Ok(warp::reply::json(&())) - } - Err(e) => { - let msg = format!("{:?}", e); - error!( - log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) - } - } + publish_blocks::publish_block(block, chain, &network_tx, log) + .await + .map(|()| warp::reply()) }, ); @@ -1013,7 +1035,7 @@ pub fn serve( */ // POST beacon/blocks - let post_beacon_blinded_blocks = eth1_v1 + let post_beacon_blinded_blocks = eth_v1 .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(warp::path::end()) @@ -1022,90 +1044,13 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: Arc>>, + |block: SignedBeaconBlock>, chain: Arc>, network_tx: UnboundedSender>, - _log: Logger| async move { - if let Some(el) = chain.execution_layer.as_ref() { - //FIXME(sean): we may not always receive the payload in this response because it - // should be the relay's job to propogate the block. However, since this block is - // already signed and sent this might be ok (so long as the relay validates - // the block before revealing the payload). - - //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should - // be able to support the normal block proposal flow, because at some point full block endpoints - // will be deprecated from the beacon API. This will entail creating full blocks in - // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded - // blocks. We will access the payload of those blocks here. This flow should happen if the - // execution layer has no payload builders or if we have not yet finalized post-merge transition. - let payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { - warp_utils::reject::custom_server_error(format!("proposal failed: {:?}", e)) - })?; - let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { - message: BeaconBlockMerge { - slot: block.message().slot(), - proposer_index: block.message().proposer_index(), - parent_root: block.message().parent_root(), - state_root: block.message().state_root(), - body: BeaconBlockBodyMerge { - randao_reveal: block.message().body().randao_reveal().clone(), - eth1_data: block.message().body().eth1_data().clone(), - graffiti: *block.message().body().graffiti(), - proposer_slashings: block - .message() - .body() - .proposer_slashings() - .clone(), - attester_slashings: block - .message() - .body() - .attester_slashings() - .clone(), - attestations: block.message().body().attestations().clone(), - deposits: block.message().body().deposits().clone(), - voluntary_exits: block.message().body().voluntary_exits().clone(), - sync_aggregate: block - .message() - .body() - .sync_aggregate() - .unwrap() - .clone(), - execution_payload: payload.into(), - }, - }, - signature: block.signature().clone(), - }); - let new_block = Arc::new(new_block); - - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(new_block.clone()), - )?; - - match chain.process_block(new_block).await { - Ok(_) => { - // Update the head since it's likely this block will become the new - // head. - chain - .recompute_head_at_current_slot() - .await - .map_err(warp_utils::reject::beacon_chain_error)?; - - Ok(warp::reply::json(&())) - } - Err(e) => { - let msg = format!("{:?}", e); - - Err(warp_utils::reject::broadcast_without_import(msg)) - } - } - } else { - Err(warp_utils::reject::custom_server_error( - "no execution layer found".to_string(), - )) - } + log: Logger| async move { + publish_blocks::publish_blinded_block(block, chain, &network_tx, log) + .await + .map(|()| warp::reply()) }, ); @@ -1115,7 +1060,7 @@ pub fn serve( )) }); - let beacon_blocks_path_v1 = eth1_v1 + let beacon_blocks_path_v1 = eth_v1 .and(warp::path("beacon")) .and(warp::path("blocks")) .and(block_id_or_err) @@ -1138,10 +1083,11 @@ pub fn serve( chain: Arc>, accept_header: Option| { async move { - let block = block_id.full_block(&chain).await?; + let (block, execution_optimistic) = block_id.full_block(&chain).await?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; + match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) @@ -1153,8 +1099,13 @@ pub fn serve( e )) }), - _ => fork_versioned_response(endpoint_version, fork_name, block) - .map(|res| warp::reply::json(&res).into_response()), + _ => execution_optimistic_fork_versioned_response( + endpoint_version, + fork_name, + execution_optimistic, + block, + ) + .map(|res| warp::reply::json(&res).into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) } @@ -1168,10 +1119,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - block_id - .root(&chain) - .map(api_types::RootData::from) - .map(api_types::GenericResponse::from) + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + Ok(api_types::GenericResponse::from(api_types::RootData::from( + block.canonical_root(), + )) + .add_execution_optimistic(execution_optimistic)) }) }); @@ -1182,10 +1135,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - block_id - .blinded_block(&chain) - .map(|block| block.message().body().attestations().clone()) - .map(api_types::GenericResponse::from) + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + Ok( + api_types::GenericResponse::from(block.message().body().attestations().clone()) + .add_execution_optimistic(execution_optimistic), + ) }) }); @@ -1193,7 +1148,7 @@ pub fn serve( * beacon/pool */ - let beacon_pool_path = eth1_v1 + let beacon_pool_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("pool")) .and(chain_filter.clone()); @@ -1517,7 +1472,7 @@ pub fn serve( * config */ - let config_path = eth1_v1.and(warp::path("config")); + let config_path = eth_v1.and(warp::path("config")); // GET config/fork_schedule let get_config_fork_schedule = config_path @@ -1591,7 +1546,10 @@ pub fn serve( chain: Arc>| { blocking_task(move || match accept_header { Some(api_types::Accept::Ssz) => { - let state = state_id.state(&chain)?; + // We can ignore the optimistic status for the "fork" since it's a + // specification constant that doesn't change across competing heads of the + // beacon chain. + let (state, _execution_optimistic) = state_id.state(&chain)?; let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1607,44 +1565,71 @@ pub fn serve( )) }) } - _ => state_id.map_state(&chain, |state| { - let fork_name = state - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - let res = fork_versioned_response(endpoint_version, fork_name, &state)?; - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }), + _ => state_id.map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let fork_name = state + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let res = execution_optimistic_fork_versioned_response( + endpoint_version, + fork_name, + execution_optimistic, + &state, + )?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }, + ), }) }, ); // GET debug/beacon/heads - let get_debug_beacon_heads = eth1_v1 + let get_debug_beacon_heads = any_version .and(warp::path("debug")) .and(warp::path("beacon")) .and(warp::path("heads")) .and(warp::path::end()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let heads = chain - .heads() - .into_iter() - .map(|(root, slot)| api_types::ChainHeadData { slot, root }) - .collect::>(); - Ok(api_types::GenericResponse::from(heads)) - }) - }); + .and_then( + |endpoint_version: EndpointVersion, chain: Arc>| { + blocking_json_task(move || { + let heads = chain + .heads() + .into_iter() + .map(|(root, slot)| { + let execution_optimistic = if endpoint_version == V1 { + None + } else if endpoint_version == V2 { + chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&root) + .ok() + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + Ok(api_types::ChainHeadData { + slot, + root, + execution_optimistic, + }) + }) + .collect::, warp::Rejection>>(); + Ok(api_types::GenericResponse::from(heads?)) + }) + }, + ); /* * node */ // GET node/identity - let get_node_identity = eth1_v1 + let get_node_identity = eth_v1 .and(warp::path("node")) .and(warp::path("identity")) .and(warp::path::end()) @@ -1682,7 +1667,7 @@ pub fn serve( }); // GET node/version - let get_node_version = eth1_v1 + let get_node_version = eth_v1 .and(warp::path("node")) .and(warp::path("version")) .and(warp::path::end()) @@ -1695,7 +1680,7 @@ pub fn serve( }); // GET node/syncing - let get_node_syncing = eth1_v1 + let get_node_syncing = eth_v1 .and(warp::path("node")) .and(warp::path("syncing")) .and(warp::path::end()) @@ -1712,8 +1697,13 @@ pub fn serve( // Taking advantage of saturating subtraction on slot. let sync_distance = current_slot - head_slot; + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + let syncing_data = api_types::SyncingData { is_syncing: network_globals.sync_state.read().is_syncing(), + is_optimistic, head_slot, sync_distance, }; @@ -1724,7 +1714,7 @@ pub fn serve( ); // GET node/health - let get_node_health = eth1_v1 + let get_node_health = eth_v1 .and(warp::path("node")) .and(warp::path("health")) .and(warp::path::end()) @@ -1749,7 +1739,7 @@ pub fn serve( }); // GET node/peers/{peer_id} - let get_node_peers_by_id = eth1_v1 + let get_node_peers_by_id = eth_v1 .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::param::()) @@ -1806,7 +1796,7 @@ pub fn serve( ); // GET node/peers - let get_node_peers = eth1_v1 + let get_node_peers = eth_v1 .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::end()) @@ -1875,7 +1865,7 @@ pub fn serve( ); // GET node/peer_count - let get_node_peer_count = eth1_v1 + let get_node_peer_count = eth_v1 .and(warp::path("node")) .and(warp::path("peer_count")) .and(warp::path::end()) @@ -1916,7 +1906,7 @@ pub fn serve( */ // GET validator/duties/proposer/{epoch} - let get_validator_duties_proposer = eth1_v1 + let get_validator_duties_proposer = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("proposer")) @@ -2059,7 +2049,7 @@ pub fn serve( ); // GET validator/attestation_data?slot,committee_index - let get_validator_attestation_data = eth1_v1 + let get_validator_attestation_data = eth_v1 .and(warp::path("validator")) .and(warp::path("attestation_data")) .and(warp::path::end()) @@ -2091,7 +2081,7 @@ pub fn serve( ); // GET validator/aggregate_attestation?attestation_data_root,slot - let get_validator_aggregate_attestation = eth1_v1 + let get_validator_aggregate_attestation = eth_v1 .and(warp::path("validator")) .and(warp::path("aggregate_attestation")) .and(warp::path::end()) @@ -2123,7 +2113,7 @@ pub fn serve( ); // POST validator/duties/attester/{epoch} - let post_validator_duties_attester = eth1_v1 + let post_validator_duties_attester = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("attester")) @@ -2145,7 +2135,7 @@ pub fn serve( ); // POST validator/duties/sync - let post_validator_duties_sync = eth1_v1 + let post_validator_duties_sync = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("sync")) @@ -2167,7 +2157,7 @@ pub fn serve( ); // GET validator/sync_committee_contribution - let get_validator_sync_committee_contribution = eth1_v1 + let get_validator_sync_committee_contribution = eth_v1 .and(warp::path("validator")) .and(warp::path("sync_committee_contribution")) .and(warp::path::end()) @@ -2179,6 +2169,12 @@ pub fn serve( blocking_json_task(move || { chain .get_aggregated_sync_committee_contribution(&sync_committee_data) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch sync contribution: {:?}", + e + )) + })? .map(api_types::GenericResponse::from) .ok_or_else(|| { warp_utils::reject::custom_not_found( @@ -2190,7 +2186,7 @@ pub fn serve( ); // POST validator/aggregate_and_proofs - let post_validator_aggregate_and_proofs = eth1_v1 + let post_validator_aggregate_and_proofs = eth_v1 .and(warp::path("validator")) .and(warp::path("aggregate_and_proofs")) .and(warp::path::end()) @@ -2291,7 +2287,7 @@ pub fn serve( }, ); - let post_validator_contribution_and_proofs = eth1_v1 + let post_validator_contribution_and_proofs = eth_v1 .and(warp::path("validator")) .and(warp::path("contribution_and_proofs")) .and(warp::path::end()) @@ -2318,7 +2314,7 @@ pub fn serve( ); // POST validator/beacon_committee_subscriptions - let post_validator_beacon_committee_subscriptions = eth1_v1 + let post_validator_beacon_committee_subscriptions = eth_v1 .and(warp::path("validator")) .and(warp::path("beacon_committee_subscriptions")) .and(warp::path::end()) @@ -2358,7 +2354,7 @@ pub fn serve( ); // POST validator/prepare_beacon_proposer - let post_validator_prepare_beacon_proposer = eth1_v1 + let post_validator_prepare_beacon_proposer = eth_v1 .and(warp::path("validator")) .and(warp::path("prepare_beacon_proposer")) .and(warp::path::end()) @@ -2406,7 +2402,7 @@ pub fn serve( ); // POST validator/register_validator - let post_validator_register_validator = eth1_v1 + let post_validator_register_validator = eth_v1 .and(warp::path("validator")) .and(warp::path("register_validator")) .and(warp::path::end()) @@ -2449,19 +2445,13 @@ pub fn serve( }) .collect::>(); - debug!( - log, - "Resolved validator request pubkeys"; - "count" => preparation_data.len() - ); - // Update the prepare beacon proposer cache based on this request. execution_layer .update_proposer_preparation(current_epoch, &preparation_data) .await; // Call prepare beacon proposer blocking with the latest update in order to make - // sure we have a local payload to fall back to in the event of the blined block + // sure we have a local payload to fall back to in the event of the blinded block // flow failing. chain .prepare_beacon_proposer(current_slot) @@ -2473,13 +2463,41 @@ pub fn serve( )) })?; - //TODO(sean): In the MEV-boost PR, add a call here to send the update request to the builder + let builder = execution_layer + .builder() + .as_ref() + .ok_or(BeaconChainError::BuilderMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; - Ok::<_, warp::Rejection>(warp::reply::json(&())) + info!( + log, + "Forwarding register validator request to connected builder"; + "count" => register_val_data.len(), + ); + + builder + .post_builder_validators(®ister_val_data) + .await + .map(|resp| warp::reply::json(&resp)) + .map_err(|e| { + error!(log, "Error from connected relay"; "error" => ?e); + // Forward the HTTP status code if we are able to, otherwise fall back + // to a server error. + if let eth2::Error::ServerMessage(message) = e { + if message.code == StatusCode::BAD_REQUEST.as_u16() { + return warp_utils::reject::custom_bad_request(message.message); + } else { + // According to the spec this response should only be a 400 or 500, + // so we fall back to a 500 here. + return warp_utils::reject::custom_server_error(message.message); + } + } + warp_utils::reject::custom_server_error(format!("{e:?}")) + }) }, ); // POST validator/sync_committee_subscriptions - let post_validator_sync_committee_subscriptions = eth1_v1 + let post_validator_sync_committee_subscriptions = eth_v1 .and(warp::path("validator")) .and(warp::path("sync_committee_subscriptions")) .and(warp::path::end()) @@ -2759,7 +2777,8 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|state_id: StateId, chain: Arc>| { blocking_task(move || { - let state = state_id.state(&chain)?; + // This debug endpoint provides no indication of optimistic status. + let (state, _execution_optimistic) = state_id.state(&chain)?; Response::builder() .status(200) .header("Content-Type", "application/ssz") @@ -2886,7 +2905,19 @@ pub fn serve( }) }); - let get_events = eth1_v1 + // GET lighthouse/merge_readiness + let get_lighthouse_merge_readiness = warp::path("lighthouse") + .and(warp::path("merge_readiness")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| async move { + let merge_readiness = chain.check_merge_readiness().await; + Ok::<_, warp::reject::Rejection>(warp::reply::json(&api_types::GenericResponse::from( + merge_readiness, + ))) + }); + + let get_events = eth_v1 .and(warp::path("events")) .and(warp::path::end()) .and(multi_key_query::()) @@ -3014,6 +3045,7 @@ pub fn serve( .or(get_lighthouse_block_rewards.boxed()) .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) + .or(get_lighthouse_merge_readiness.boxed()) .or(get_events.boxed()), ) .or(warp::post().and( diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index bddae55549..877d64e20f 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -55,10 +55,16 @@ pub fn proposer_duties( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, _execution_status, _fork) = + let (proposers, dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(chain, request_epoch, dependent_root, proposers) + convert_to_api_response( + chain, + request_epoch, + dependent_root, + execution_status.is_optimistic_or_invalid(), + proposers, + ) } else if request_epoch > current_epoch .safe_add(1) @@ -88,17 +94,18 @@ fn try_proposer_duties_from_cache( request_epoch: Epoch, chain: &BeaconChain, ) -> Result, warp::reject::Rejection> { - let (head_slot, head_block_root, head_decision_root) = { - let head = chain.canonical_head.cached_head(); - let head_block_root = head.head_block_root(); - let decision_root = head - .snapshot - .beacon_state - .proposer_shuffling_decision_root(head_block_root) - .map_err(warp_utils::reject::beacon_state_error)?; - (head.head_slot(), head_block_root, decision_root) - }; - let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); + let head = chain.canonical_head.cached_head(); + let head_block = &head.snapshot.beacon_block; + let head_block_root = head.head_block_root(); + let head_decision_root = head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; + let head_epoch = head_block.slot().epoch(T::EthSpec::slots_per_epoch()); + let execution_optimistic = chain + .is_optimistic_or_invalid_head_block(head_block) + .map_err(warp_utils::reject::beacon_chain_error)?; let dependent_root = match head_epoch.cmp(&request_epoch) { // head_epoch == request_epoch @@ -120,7 +127,13 @@ fn try_proposer_duties_from_cache( .get_epoch::(dependent_root, request_epoch) .cloned() .map(|indices| { - convert_to_api_response(chain, request_epoch, dependent_root, indices.to_vec()) + convert_to_api_response( + chain, + request_epoch, + dependent_root, + execution_optimistic, + indices.to_vec(), + ) }) .transpose() } @@ -139,7 +152,7 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { - let (indices, dependent_root, _execution_status, fork) = + let (indices, dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) .map_err(warp_utils::reject::beacon_chain_error)?; @@ -151,7 +164,13 @@ fn compute_and_cache_proposer_duties( .map_err(BeaconChainError::from) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(chain, current_epoch, dependent_root, indices) + convert_to_api_response( + chain, + current_epoch, + dependent_root, + execution_status.is_optimistic_or_invalid(), + indices, + ) } /// Compute some proposer duties by reading a `BeaconState` from disk, completely ignoring the @@ -162,31 +181,37 @@ fn compute_historic_proposer_duties( ) -> Result { // If the head is quite old then it might still be relevant for a historical request. // - // Use the `with_head` function to read & clone in a single call to avoid race conditions. - let state_opt = chain - .with_head(|head| { - if head.beacon_state.current_epoch() <= epoch { - Ok(Some(( - head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), - ))) - } else { - Ok(None) - } - }) - .map_err(warp_utils::reject::beacon_chain_error)?; - - let state = if let Some((state_root, mut state)) = state_opt { - // If we've loaded the head state it might be from a previous epoch, ensure it's in a - // suitable epoch. - ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec) + // Avoid holding the `cached_head` longer than necessary. + let state_opt = { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() .map_err(warp_utils::reject::beacon_chain_error)?; - state - } else { - StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + let head = &cached_head.snapshot; + + if head.beacon_state.current_epoch() <= epoch { + Some(( + head.beacon_state_root(), + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + execution_status.is_optimistic_or_invalid(), + )) + } else { + None + } }; + let (state, execution_optimistic) = + if let Some((state_root, mut state, execution_optimistic)) = state_opt { + // If we've loaded the head state it might be from a previous epoch, ensure it's in a + // suitable epoch. + ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec) + .map_err(warp_utils::reject::beacon_chain_error)?; + (state, execution_optimistic) + } else { + StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + }; + // Ensure the state lookup was correct. if state.current_epoch() != epoch { return Err(warp_utils::reject::custom_server_error(format!( @@ -208,7 +233,7 @@ fn compute_historic_proposer_duties( .map_err(BeaconChainError::from) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(chain, epoch, dependent_root, indices) + convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices) } /// Converts the internal representation of proposer duties into one that is compatible with the @@ -217,6 +242,7 @@ fn convert_to_api_response( chain: &BeaconChain, epoch: Epoch, dependent_root: Hash256, + execution_optimistic: bool, indices: Vec, ) -> Result { let index_to_pubkey_map = chain @@ -251,6 +277,7 @@ fn convert_to_api_response( } else { Ok(api_types::DutiesResponse { dependent_root, + execution_optimistic: Some(execution_optimistic), data: proposer_data, }) } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs new file mode 100644 index 0000000000..b282e6f490 --- /dev/null +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -0,0 +1,155 @@ +use crate::metrics; +use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; +use beacon_chain::{BeaconChain, BeaconChainTypes, CountUnrealized}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{crit, error, info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tree_hash::TreeHash; +use types::{ + BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, + SignedBeaconBlock, +}; +use warp::Rejection; + +/// Handles a request from the HTTP API for full blocks. +pub async fn publish_block( + block: Arc>, + chain: Arc>, + network_tx: &UnboundedSender>, + log: Logger, +) -> Result<(), Rejection> { + let seen_timestamp = timestamp_now(); + + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + + // Determine the delay after the start of the slot, register it with metrics. + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); + + match chain + .process_block(block.clone(), CountUnrealized::True) + .await + { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "block_delay" => ?delay, + "root" => format!("{}", root), + "proposer_index" => block.message().proposer_index(), + "slot" => block.slot(), + ); + + // Notify the validator monitor. + chain.validator_monitor.read().register_api_block( + seen_timestamp, + block.message(), + root, + &chain.slot_clock, + ); + + // Update the head since it's likely this block will become the new + // head. + chain.recompute_head_at_current_slot().await; + + // Perform some logging to inform users if their blocks are being produced + // late. + // + // Check to see the thresholds are non-zero to avoid logging errors with small + // slot times (e.g., during testing) + let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let error_threshold = crit_threshold / 2; + if delay >= crit_threshold { + crit!( + log, + "Block was broadcast too late"; + "msg" => "system may be overloaded, block likely to be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } else if delay >= error_threshold { + error!( + log, + "Block broadcast was delayed"; + "msg" => "system may be overloaded, block may be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } + + Ok(()) + } + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } +} + +/// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full +/// blocks before publishing. +pub async fn publish_blinded_block( + block: SignedBeaconBlock>, + chain: Arc>, + network_tx: &UnboundedSender>, + log: Logger, +) -> Result<(), Rejection> { + let full_block = reconstruct_block(chain.clone(), block, log.clone()).await?; + publish_block::(Arc::new(full_block), chain, network_tx, log).await +} + +/// Deconstruct the given blinded block, and construct a full block. This attempts to use the +/// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve +/// the full payload. +async fn reconstruct_block( + chain: Arc>, + block: SignedBeaconBlock>, + log: Logger, +) -> Result>, Rejection> { + let full_payload = if let Ok(payload_header) = block.message().body().execution_payload() { + let el = chain.execution_layer.as_ref().ok_or_else(|| { + warp_utils::reject::custom_server_error("Missing execution layer".to_string()) + })?; + + // If the execution block hash is zero, use an empty payload. + let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { + ExecutionPayload::default() + // If we already have an execution payload with this transactions root cached, use it. + } else if let Some(cached_payload) = + el.get_payload_by_root(&payload_header.tree_hash_root()) + { + info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash); + cached_payload + // Otherwise, this means we are attempting a blind block proposal. + } else { + let full_payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Blind block proposal failed: {:?}", + e + )) + })?; + info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); + full_payload + }; + + Some(full_payload) + } else { + None + }; + + block.try_into_full_block(full_payload).ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) + }) +} diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 8604c91899..051789c953 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -1,14 +1,17 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use crate::ExecutionOptimistic; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::StateId as CoreStateId; +use std::fmt; use std::str::FromStr; -use types::{BeaconState, EthSpec, Fork, Hash256, Slot}; +use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; /// Wraps `eth2::types::StateId` and provides common state-access functionality. E.g., reading /// states or parts of states from the database. -pub struct StateId(CoreStateId); +#[derive(Debug)] +pub struct StateId(pub CoreStateId); impl StateId { - pub fn slot(slot: Slot) -> Self { + pub fn from_slot(slot: Slot) -> Self { Self(CoreStateId::Slot(slot)) } @@ -16,54 +19,128 @@ impl StateId { pub fn root( &self, chain: &BeaconChain, - ) -> Result { - let slot = match &self.0 { - CoreStateId::Head => return Ok(chain.canonical_head.cached_head().head_state_root()), - CoreStateId::Genesis => return Ok(chain.genesis_state_root), - CoreStateId::Finalized => chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - CoreStateId::Justified => chain - .canonical_head - .cached_head() - .justified_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - CoreStateId::Slot(slot) => *slot, - CoreStateId::Root(root) => return Ok(*root), + ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { + let (slot, execution_optimistic) = match &self.0 { + CoreStateId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok(( + cached_head.head_state_root(), + execution_status.is_optimistic_or_invalid(), + )); + } + CoreStateId::Genesis => return Ok((chain.genesis_state_root, false)), + CoreStateId::Finalized => { + let finalized_checkpoint = + chain.canonical_head.cached_head().finalized_checkpoint(); + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)? + } + CoreStateId::Justified => { + let justified_checkpoint = + chain.canonical_head.cached_head().justified_checkpoint(); + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)? + } + CoreStateId::Slot(slot) => ( + *slot, + chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?, + ), + CoreStateId::Root(root) => { + if let Some(hot_summary) = chain + .store + .load_hot_state_summary(root) + .map_err(BeaconChainError::DBError) + .map_err(warp_utils::reject::beacon_chain_error)? + { + let execution_optimistic = chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic)); + } else if let Some(_cold_state_slot) = chain + .store + .load_cold_state_slot(root) + .map_err(BeaconChainError::DBError) + .map_err(warp_utils::reject::beacon_chain_error)? + { + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let finalized_root = fork_choice + .cached_fork_choice_view() + .finalized_checkpoint + .root; + let execution_optimistic = fork_choice + .is_optimistic_or_invalid_block_no_fallback(&finalized_root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic)); + } else { + return Err(warp_utils::reject::custom_not_found(format!( + "beacon state for state root {}", + root + ))); + } + } }; - chain + let root = chain .state_root_at_slot(slot) .map_err(warp_utils::reject::beacon_chain_error)? .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) - }) + })?; + + Ok((root, execution_optimistic)) } /// Return the `fork` field of the state identified by `self`. + /// Also returns the `execution_optimistic` value of the state. + pub fn fork_and_execution_optimistic( + &self, + chain: &BeaconChain, + ) -> Result<(Fork, bool), warp::Rejection> { + self.map_state_and_execution_optimistic(chain, |state, execution_optimistic| { + Ok((state.fork(), execution_optimistic)) + }) + } + + /// Convenience function to compute `fork` when `execution_optimistic` isn't desired. pub fn fork( &self, chain: &BeaconChain, ) -> Result { - self.map_state(chain, |state| Ok(state.fork())) + self.fork_and_execution_optimistic(chain) + .map(|(fork, _)| fork) } /// Return the `BeaconState` identified by `self`. pub fn state( &self, chain: &BeaconChain, - ) -> Result, warp::Rejection> { - let (state_root, slot_opt) = match &self.0 { - CoreStateId::Head => return Ok(chain.head_beacon_state_cloned()), + ) -> Result<(BeaconState, ExecutionOptimistic), warp::Rejection> { + let ((state_root, execution_optimistic), slot_opt) = match &self.0 { + CoreStateId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok(( + cached_head + .snapshot + .beacon_state + .clone_with_only_committee_caches(), + execution_status.is_optimistic_or_invalid(), + )); + } CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), _ => (self.root(chain)?, None), }; - chain + let state = chain .get_state(&state_root, slot_opt) .map_err(warp_utils::reject::beacon_chain_error) .and_then(|opt| { @@ -73,13 +150,17 @@ impl StateId { state_root )) }) - }) + })?; + + Ok((state, execution_optimistic)) } + /* /// Map a function across the `BeaconState` identified by `self`. /// /// This function will avoid instantiating/copying a new state when `self` points to the head /// of the chain. + #[allow(dead_code)] pub fn map_state( &self, chain: &BeaconChain, @@ -95,6 +176,36 @@ impl StateId { _ => func(&self.state(chain)?), } } + */ + + /// Functions the same as `map_state` but additionally computes the value of + /// `execution_optimistic` of the state identified by `self`. + /// + /// This is to avoid re-instantiating `state` unnecessarily. + pub fn map_state_and_execution_optimistic( + &self, + chain: &BeaconChain, + func: F, + ) -> Result + where + F: Fn(&BeaconState, bool) -> Result, + { + let (state, execution_optimistic) = match &self.0 { + CoreStateId::Head => { + let (head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + return func( + &head.snapshot.beacon_state, + execution_status.is_optimistic_or_invalid(), + ); + } + _ => self.state(chain)?, + }; + + func(&state, execution_optimistic) + } } impl FromStr for StateId { @@ -104,3 +215,35 @@ impl FromStr for StateId { CoreStateId::from_str(s).map(Self) } } + +impl fmt::Display for StateId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// Returns the first slot of the checkpoint's `epoch` and the execution status of the checkpoint's +/// `root`. +pub fn checkpoint_slot_and_execution_optimistic( + chain: &BeaconChain, + checkpoint: Checkpoint, +) -> Result<(Slot, ExecutionOptimistic), warp::reject::Rejection> { + let slot = checkpoint.epoch.start_slot(T::EthSpec::slots_per_epoch()); + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let finalized_checkpoint = fork_choice.cached_fork_choice_view().finalized_checkpoint; + + // If the checkpoint is pre-finalization, just use the optimistic status of the finalized + // block. + let root = if checkpoint.epoch < finalized_checkpoint.epoch { + &finalized_checkpoint.root + } else { + &checkpoint.root + }; + + let execution_optimistic = fork_choice + .is_optimistic_or_invalid_block_no_fallback(root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + + Ok((slot, execution_optimistic)) +} diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 3ebc3c4ec8..77becef7df 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -22,7 +22,7 @@ use types::{ }; /// The struct that is returned to the requesting HTTP client. -type SyncDuties = api_types::GenericResponse>; +type SyncDuties = api_types::ExecutionOptimisticResponse>; /// Handles a request from the HTTP API for sync committee duties. pub fn sync_committee_duties( @@ -34,14 +34,20 @@ pub fn sync_committee_duties( altair_fork_epoch } else { // Empty response for networks with Altair disabled. - return Ok(convert_to_response(vec![])); + return Ok(convert_to_response(vec![], false)); }; + // Even when computing duties from state, any block roots pulled using the request epoch are + // still dependent on the head. So using `is_optimistic_head` is fine for both cases. + let execution_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + // Try using the head's sync committees to satisfy the request. This should be sufficient for // the vast majority of requests. Rather than checking if we think the request will succeed in a // way prone to data races, we attempt the request immediately and check the error code. match chain.sync_committee_duties_from_head(request_epoch, request_indices) { - Ok(duties) => return Ok(convert_to_response(duties)), + Ok(duties) => return Ok(convert_to_response(duties, execution_optimistic)), Err(BeaconChainError::SyncDutiesError(BeaconStateError::SyncCommitteeNotKnown { .. })) @@ -60,7 +66,7 @@ pub fn sync_committee_duties( )), e => warp_utils::reject::beacon_chain_error(e), })?; - Ok(convert_to_response(duties)) + Ok(convert_to_response(duties, execution_optimistic)) } /// Slow path for duties: load a state and use it to compute the duties. @@ -117,8 +123,9 @@ fn duties_from_state_load( } } -fn convert_to_response(duties: Vec>) -> SyncDuties { +fn convert_to_response(duties: Vec>, execution_optimistic: bool) -> SyncDuties { api_types::GenericResponse::from(duties.into_iter().flatten().collect::>()) + .add_execution_optimistic(execution_optimistic) } /// Receive sync committee duties, storing them in the pools & broadcasting them. diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index 48dfc17ffa..917e85e649 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -16,7 +16,10 @@ fn end_of_epoch_state( chain: &BeaconChain, ) -> Result, warp::reject::Rejection> { let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); - StateId::slot(target_slot).state(chain) + // The execution status is not returned, any functions which rely upon this method might return + // optimistic information without explicitly declaring so. + let (state, _execution_status) = StateId::from_slot(target_slot).state(chain)?; + Ok(state) } /// Generate an `EpochProcessingSummary` for `state`. diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 854ef0c858..87ba3a4663 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,4 +1,6 @@ -use crate::api_types::{EndpointVersion, ForkVersionedResponse}; +use crate::api_types::{ + EndpointVersion, ExecutionOptimisticForkVersionedResponse, ForkVersionedResponse, +}; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; use types::{ForkName, InconsistentFork}; @@ -25,6 +27,26 @@ pub fn fork_versioned_response( }) } +pub fn execution_optimistic_fork_versioned_response( + endpoint_version: EndpointVersion, + fork_name: ForkName, + execution_optimistic: bool, + data: T, +) -> Result, warp::reject::Rejection> { + let fork_name = if endpoint_version == V1 { + None + } else if endpoint_version == V2 { + Some(fork_name) + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + Ok(ExecutionOptimisticForkVersionedResponse { + version: fork_name, + execution_optimistic: Some(execution_optimistic), + data, + }) +} + /// Add the `Eth-Consensus-Version` header to a response. pub fn add_consensus_version_header(reply: T, fork_name: ForkName) -> WithHeader { reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()) diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 06466c43bb..8f9856991f 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -86,6 +86,16 @@ impl InteractiveTester { pub async fn create_api_server( chain: Arc>, log: Logger, +) -> ApiServer> { + // Get a random unused port. + let port = unused_port::unused_tcp_port().unwrap(); + create_api_server_on_port(chain, log, port).await +} + +pub async fn create_api_server_on_port( + chain: Arc>, + log: Logger, + port: u16, ) -> ApiServer> { let (network_tx, network_rx) = mpsc::unbounded_channel(); @@ -129,7 +139,7 @@ pub async fn create_api_server( config: Config { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - listen_port: 0, + listen_port: port, allow_origin: None, serve_legacy_spec: true, tls_config: None, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index b57a87dfca..38c06848cf 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,4 +1,4 @@ -use crate::common::{create_api_server, ApiServer}; +use crate::common::{create_api_server, create_api_server_on_port, ApiServer}; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, @@ -8,13 +8,17 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::*, + types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; +use execution_layer::test_utils::Operation; +use execution_layer::test_utils::TestingBuilder; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; +use http_api::{BlockId, StateId}; use lighthouse_network::{Enr, EnrExt, PeerId}; use network::NetworkMessage; +use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_slot_processing; @@ -25,8 +29,8 @@ use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ - AggregateSignature, BeaconState, BitList, Domain, EthSpec, Hash256, Keypair, MainnetEthSpec, - RelativeEpoch, SelectionProof, SignedRoot, Slot, + AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, Hash256, Keypair, + MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, }; type E = MainnetEthSpec; @@ -64,6 +68,7 @@ struct ApiTester { network_rx: mpsc::UnboundedReceiver>, local_enr: Enr, external_peer_id: PeerId, + mock_builder: Option>>, } impl ApiTester { @@ -74,13 +79,30 @@ impl ApiTester { Self::new_from_spec(spec).await } + pub async fn new_with_hard_forks(altair: bool, bellatrix: bool) -> Self { + let mut spec = E::default_spec(); + spec.shard_committee_period = 2; + // Set whether the chain has undergone each hard fork. + if altair { + spec.altair_fork_epoch = Some(Epoch::new(0)); + } + if bellatrix { + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + } + Self::new_from_spec(spec).await + } + pub async fn new_from_spec(spec: ChainSpec) -> Self { + // Get a random unused port + let port = unused_port::unused_tcp_port().unwrap(); + let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); + let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer() + .mock_execution_layer_with_builder(beacon_url.clone()) .build(), ); @@ -190,25 +212,28 @@ impl ApiTester { let ApiServer { server, - listening_socket, + listening_socket: _, shutdown_tx, network_rx, local_enr, external_peer_id, - } = create_api_server(chain.clone(), log).await; + } = create_api_server_on_port(chain.clone(), log, port).await; harness.runtime.task_executor.spawn(server, "api_server"); let client = BeaconNodeHttpClient::new( - SensitiveUrl::parse(&format!( - "http://{}:{}", - listening_socket.ip(), - listening_socket.port() - )) - .unwrap(), + beacon_url, Timeouts::set_all(Duration::from_secs(SECONDS_PER_SLOT)), ); + let builder_ref = harness.mock_builder.as_ref().unwrap().clone(); + harness.runtime.task_executor.spawn( + async move { builder_ref.run().await }, + "mock_builder_server", + ); + + let mock_builder = harness.mock_builder.clone(); + Self { harness, chain, @@ -224,6 +249,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_builder, } } @@ -306,6 +332,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_builder: None, } } @@ -313,6 +340,13 @@ impl ApiTester { &self.harness.validator_keypairs } + pub async fn new_mev_tester() -> Self { + Self::new_with_hard_forks(true, true) + .await + .test_post_validator_register_validator() + .await + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -325,99 +359,43 @@ impl ApiTester { fn interesting_state_ids(&self) -> Vec { let mut ids = vec![ - StateId::Head, - StateId::Genesis, - StateId::Finalized, - StateId::Justified, - StateId::Slot(Slot::new(0)), - StateId::Slot(Slot::new(32)), - StateId::Slot(Slot::from(SKIPPED_SLOTS[0])), - StateId::Slot(Slot::from(SKIPPED_SLOTS[1])), - StateId::Slot(Slot::from(SKIPPED_SLOTS[2])), - StateId::Slot(Slot::from(SKIPPED_SLOTS[3])), - StateId::Root(Hash256::zero()), + StateId(CoreStateId::Head), + StateId(CoreStateId::Genesis), + StateId(CoreStateId::Finalized), + StateId(CoreStateId::Justified), + StateId(CoreStateId::Slot(Slot::new(0))), + StateId(CoreStateId::Slot(Slot::new(32))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[0]))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[1]))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[2]))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[3]))), + StateId(CoreStateId::Root(Hash256::zero())), ]; - ids.push(StateId::Root( + ids.push(StateId(CoreStateId::Root( self.chain.canonical_head.cached_head().head_state_root(), - )); + ))); ids } fn interesting_block_ids(&self) -> Vec { let mut ids = vec![ - BlockId::Head, - BlockId::Genesis, - BlockId::Finalized, - BlockId::Justified, - BlockId::Slot(Slot::new(0)), - BlockId::Slot(Slot::new(32)), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[0])), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[1])), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[2])), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[3])), - BlockId::Root(Hash256::zero()), + BlockId(CoreBlockId::Head), + BlockId(CoreBlockId::Genesis), + BlockId(CoreBlockId::Finalized), + BlockId(CoreBlockId::Justified), + BlockId(CoreBlockId::Slot(Slot::new(0))), + BlockId(CoreBlockId::Slot(Slot::new(32))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[0]))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[1]))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[2]))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[3]))), + BlockId(CoreBlockId::Root(Hash256::zero())), ]; - ids.push(BlockId::Root( + ids.push(BlockId(CoreBlockId::Root( self.chain.canonical_head.cached_head().head_block_root(), - )); + ))); ids } - - fn get_state(&self, state_id: StateId) -> Option> { - match state_id { - StateId::Head => Some( - self.chain - .head_snapshot() - .beacon_state - .clone_with_only_committee_caches(), - ), - StateId::Genesis => self - .chain - .get_state(&self.chain.genesis_state_root, None) - .unwrap(), - StateId::Finalized => { - let finalized_slot = self - .chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()); - - let root = self - .chain - .state_root_at_slot(finalized_slot) - .unwrap() - .unwrap(); - - self.chain.get_state(&root, Some(finalized_slot)).unwrap() - } - StateId::Justified => { - let justified_slot = self - .chain - .canonical_head - .cached_head() - .justified_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()); - - let root = self - .chain - .state_root_at_slot(justified_slot) - .unwrap() - .unwrap(); - - self.chain.get_state(&root, Some(justified_slot)).unwrap() - } - StateId::Slot(slot) => { - let root = self.chain.state_root_at_slot(slot).unwrap().unwrap(); - - self.chain.get_state(&root, Some(slot)).unwrap() - } - StateId::Root(root) => self.chain.get_state(&root, None).unwrap(), - } - } - pub async fn test_beacon_genesis(self) -> Self { let result = self.client.get_beacon_genesis().await.unwrap().data; @@ -437,39 +415,15 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_beacon_states_root(state_id) + .get_beacon_states_root(state_id.0) .await .unwrap() .map(|res| res.data.root); - let expected = match state_id { - StateId::Head => Some(self.chain.canonical_head.cached_head().head_state_root()), - StateId::Genesis => Some(self.chain.genesis_state_root), - StateId::Finalized => { - let finalized_slot = self - .chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()); - - self.chain.state_root_at_slot(finalized_slot).unwrap() - } - StateId::Justified => { - let justified_slot = self - .chain - .canonical_head - .cached_head() - .justified_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()); - - self.chain.state_root_at_slot(justified_slot).unwrap() - } - StateId::Slot(slot) => self.chain.state_root_at_slot(slot).unwrap(), - StateId::Root(root) => Some(root), - }; + let expected = state_id + .root(&self.chain) + .ok() + .map(|(root, _execution_optimistic)| root); assert_eq!(result, expected, "{:?}", state_id); } @@ -481,12 +435,12 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_beacon_states_fork(state_id) + .get_beacon_states_fork(state_id.0) .await .unwrap() .map(|res| res.data); - let expected = self.get_state(state_id).map(|state| state.fork()); + let expected = state_id.fork(&self.chain).ok(); assert_eq!(result, expected, "{:?}", state_id); } @@ -498,18 +452,20 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_beacon_states_finality_checkpoints(state_id) + .get_beacon_states_finality_checkpoints(state_id.0) .await .unwrap() .map(|res| res.data); - let expected = self - .get_state(state_id) - .map(|state| FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }); + let expected = + state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }); assert_eq!(result, expected, "{:?}", state_id); } @@ -520,9 +476,9 @@ impl ApiTester { pub async fn test_beacon_states_validator_balances(self) -> Self { for state_id in self.interesting_state_ids() { for validator_indices in self.interesting_validator_indices() { - let state_opt = self.get_state(state_id); + let state_opt = state_id.state(&self.chain).ok(); let validators: Vec = match state_opt.as_ref() { - Some(state) => state.validators().clone().into(), + Some((state, _execution_optimistic)) => state.validators().clone().into(), None => vec![], }; let validator_index_ids = validator_indices @@ -545,7 +501,7 @@ impl ApiTester { let result_index_ids = self .client .get_beacon_states_validator_balances( - state_id, + state_id.0, Some(validator_index_ids.as_slice()), ) .await @@ -554,14 +510,14 @@ impl ApiTester { let result_pubkey_ids = self .client .get_beacon_states_validator_balances( - state_id, + state_id.0, Some(validator_pubkey_ids.as_slice()), ) .await .unwrap() .map(|res| res.data); - let expected = state_opt.map(|state| { + let expected = state_opt.map(|(state, _execution_optimistic)| { let mut validators = Vec::with_capacity(validator_indices.len()); for i in validator_indices { @@ -588,7 +544,10 @@ impl ApiTester { for state_id in self.interesting_state_ids() { for statuses in self.interesting_validator_statuses() { for validator_indices in self.interesting_validator_indices() { - let state_opt = self.get_state(state_id); + let state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); let validators: Vec = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -613,7 +572,7 @@ impl ApiTester { let result_index_ids = self .client .get_beacon_states_validators( - state_id, + state_id.0, Some(validator_index_ids.as_slice()), None, ) @@ -624,7 +583,7 @@ impl ApiTester { let result_pubkey_ids = self .client .get_beacon_states_validators( - state_id, + state_id.0, Some(validator_pubkey_ids.as_slice()), None, ) @@ -675,7 +634,10 @@ impl ApiTester { pub async fn test_beacon_states_validator_id(self) -> Self { for state_id in self.interesting_state_ids() { - let state_opt = self.get_state(state_id); + let state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); let validators = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -690,7 +652,7 @@ impl ApiTester { for validator_id in validator_ids { let result = self .client - .get_beacon_states_validator_id(state_id, validator_id) + .get_beacon_states_validator_id(state_id.0, validator_id) .await .unwrap() .map(|res| res.data); @@ -727,12 +689,15 @@ impl ApiTester { pub async fn test_beacon_states_committees(self) -> Self { for state_id in self.interesting_state_ids() { - let mut state_opt = self.get_state(state_id); + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let results = self .client - .get_beacon_states_committees(state_id, None, None, epoch_opt) + .get_beacon_states_committees(state_id.0, None, None, epoch_opt) .await .unwrap() .map(|res| res.data); @@ -769,37 +734,6 @@ impl ApiTester { self } - fn get_block_root(&self, block_id: BlockId) -> Option { - match block_id { - BlockId::Head => Some(self.chain.canonical_head.cached_head().head_block_root()), - BlockId::Genesis => Some(self.chain.genesis_block_root), - BlockId::Finalized => Some( - self.chain - .canonical_head - .cached_head() - .finalized_checkpoint() - .root, - ), - BlockId::Justified => Some( - self.chain - .canonical_head - .cached_head() - .justified_checkpoint() - .root, - ), - BlockId::Slot(slot) => self - .chain - .block_root_at_slot(slot, WhenSlotSkipped::None) - .unwrap(), - BlockId::Root(root) => Some(root), - } - } - - async fn get_block(&self, block_id: BlockId) -> Option> { - let root = self.get_block_root(block_id)?; - self.chain.get_block(&root).await.unwrap() - } - pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -877,14 +811,17 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_headers_block_id(block_id) + .get_beacon_headers_block_id(block_id.0) .await .unwrap() .map(|res| res.data); - let block_root_opt = self.get_block_root(block_id); + let block_root_opt = block_id + .root(&self.chain) + .ok() + .map(|(root, _execution_optimistic)| root); - if let BlockId::Slot(slot) = block_id { + if let CoreBlockId::Slot(slot) = block_id.0 { if block_root_opt.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -892,11 +829,11 @@ impl ApiTester { } } - let block_opt = if let Some(root) = block_root_opt { - self.chain.get_block(&root).await.unwrap() - } else { - None - }; + let block_opt = block_id + .full_block(&self.chain) + .await + .ok() + .map(|(block, _execution_optimistic)| block); if block_opt.is_none() && result.is_none() { continue; @@ -934,13 +871,16 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_blocks_root(block_id) + .get_beacon_blocks_root(block_id.0) .await .unwrap() .map(|res| res.data.root); - let expected = self.get_block_root(block_id); - if let BlockId::Slot(slot) = block_id { + let expected = block_id + .root(&self.chain) + .ok() + .map(|(root, _execution_optimistic)| root); + if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -982,9 +922,13 @@ impl ApiTester { pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { - let expected = self.get_block(block_id).await; + let expected = block_id + .full_block(&self.chain) + .await + .ok() + .map(|(block, _execution_optimistic)| block); - if let BlockId::Slot(slot) = block_id { + if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -993,10 +937,10 @@ impl ApiTester { } // Check the JSON endpoint. - let json_result = self.client.get_beacon_blocks(block_id).await.unwrap(); + let json_result = self.client.get_beacon_blocks(block_id.0).await.unwrap(); if let (Some(json), Some(expected)) = (&json_result, &expected) { - assert_eq!(json.data, *expected, "{:?}", block_id); + assert_eq!(&json.data, expected.as_ref(), "{:?}", block_id); assert_eq!( json.version, Some(expected.fork_name(&self.chain.spec).unwrap()) @@ -1009,23 +953,28 @@ impl ApiTester { // Check the SSZ endpoint. let ssz_result = self .client - .get_beacon_blocks_ssz(block_id, &self.chain.spec) + .get_beacon_blocks_ssz(block_id.0, &self.chain.spec) .await .unwrap(); - assert_eq!(ssz_result, expected, "{:?}", block_id); + assert_eq!( + ssz_result.as_ref(), + expected.as_ref().map(|b| b.as_ref()), + "{:?}", + block_id + ); // Check that the legacy v1 API still works but doesn't return a version field. - let v1_result = self.client.get_beacon_blocks_v1(block_id).await.unwrap(); + let v1_result = self.client.get_beacon_blocks_v1(block_id.0).await.unwrap(); if let (Some(v1_result), Some(expected)) = (&v1_result, &expected) { assert_eq!(v1_result.version, None); - assert_eq!(v1_result.data, *expected); + assert_eq!(&v1_result.data, expected.as_ref()); } else { assert_eq!(v1_result, None); assert_eq!(expected, None); } // Check that version headers are provided. - let url = self.client.get_beacon_blocks_path(block_id).unwrap(); + let url = self.client.get_beacon_blocks_path(block_id.0).unwrap(); let builders: Vec RequestBuilder> = vec![ |b| b, @@ -1060,17 +1009,18 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_blocks_attestations(block_id) + .get_beacon_blocks_attestations(block_id.0) .await .unwrap() .map(|res| res.data); - let expected = self - .get_block(block_id) - .await - .map(|block| block.message().body().attestations().clone().into()); + let expected = block_id.full_block(&self.chain).await.ok().map( + |(block, _execution_optimistic)| { + block.message().body().attestations().clone().into() + }, + ); - if let BlockId::Slot(slot) = block_id { + if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -1350,6 +1300,7 @@ impl ApiTester { let expected = SyncingData { is_syncing: false, + is_optimistic: false, head_slot, sync_distance, }; @@ -1473,9 +1424,16 @@ impl ApiTester { pub async fn test_get_debug_beacon_states(self) -> Self { for state_id in self.interesting_state_ids() { - let result_json = self.client.get_debug_beacon_states(state_id).await.unwrap(); + let result_json = self + .client + .get_debug_beacon_states(state_id.0) + .await + .unwrap(); - let mut expected = self.get_state(state_id); + let mut expected = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); expected.as_mut().map(|state| state.drop_all_caches()); if let (Some(json), Some(expected)) = (&result_json, &expected) { @@ -1492,7 +1450,7 @@ impl ApiTester { // Check SSZ API. let result_ssz = self .client - .get_debug_beacon_states_ssz(state_id, &self.chain.spec) + .get_debug_beacon_states_ssz(state_id.0, &self.chain.spec) .await .unwrap(); assert_eq!(result_ssz, expected, "{:?}", state_id); @@ -1500,7 +1458,7 @@ impl ApiTester { // Check legacy v1 API. let result_v1 = self .client - .get_debug_beacon_states_v1(state_id) + .get_debug_beacon_states_v1(state_id.0) .await .unwrap(); @@ -1513,7 +1471,10 @@ impl ApiTester { } // Check that version headers are provided. - let url = self.client.get_debug_beacon_states_path(state_id).unwrap(); + let url = self + .client + .get_debug_beacon_states_path(state_id.0) + .unwrap(); let builders: Vec RequestBuilder> = vec![|b| b, |b| b.accept(Accept::Ssz)]; @@ -1791,6 +1752,7 @@ impl ApiTester { let expected = DutiesResponse { data: expected_duties, + execution_optimistic: Some(false), dependent_root, }; @@ -2062,6 +2024,175 @@ impl ApiTester { self } + pub async fn test_blinded_block_production>(&self) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block = self + .client + .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .await + .unwrap() + .data; + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blinded_blocks(&signed_block) + .await + .unwrap(); + + // This converts the generic `Payload` to a concrete type for comparison. + let head_block = SignedBeaconBlock::from(signed_block.clone()); + assert_eq!(head_block, signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + } + + pub async fn test_blinded_block_production_no_verify_randao>( + self, + ) -> Self { + for _ in 0..E::slots_per_epoch() { + let slot = self.chain.slot().unwrap(); + + let block = self + .client + .get_validator_blinded_blocks_with_verify_randao::( + slot, + None, + None, + Some(false), + ) + .await + .unwrap() + .data; + assert_eq!(block.slot(), slot); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + + pub async fn test_blinded_block_production_verify_randao_invalid>( + self, + ) -> Self { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let bad_randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = (epoch + 1).signing_root(domain); + sk.sign(message).into() + }; + + // Check failure with no `verify_randao` passed. + self.client + .get_validator_blinded_blocks::(slot, &bad_randao_reveal, None) + .await + .unwrap_err(); + + // Check failure with `verify_randao=true`. + self.client + .get_validator_blinded_blocks_with_verify_randao::( + slot, + Some(&bad_randao_reveal), + None, + Some(true), + ) + .await + .unwrap_err(); + + // Check failure with no randao reveal provided. + self.client + .get_validator_blinded_blocks_with_verify_randao::( + slot, None, None, None, + ) + .await + .unwrap_err(); + + // Check success with `verify_randao=false`. + let block = self + .client + .get_validator_blinded_blocks_with_verify_randao::( + slot, + Some(&bad_randao_reveal), + None, + Some(false), + ) + .await + .unwrap() + .data; + + assert_eq!(block.slot(), slot); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + pub async fn test_get_validator_attestation_data(self) -> Self { let mut state = self.chain.head_beacon_state_cloned(); let slot = state.slot(); @@ -2260,7 +2391,14 @@ impl ApiTester { let mut registrations = vec![]; let mut fee_recipients = vec![]; - let fork = self.chain.head_snapshot().beacon_state.fork(); + let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); + let fork = Fork { + current_version: self.chain.spec.genesis_fork_version, + previous_version: self.chain.spec.genesis_fork_version, + epoch: genesis_epoch, + }; + + let expected_gas_limit = 11_111_111; for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { let pubkey = keypair.pk.compress(); @@ -2268,12 +2406,13 @@ impl ApiTester { let data = ValidatorRegistrationData { fee_recipient, - gas_limit: 0, + gas_limit: expected_gas_limit, timestamp: 0, pubkey, }; + let domain = self.chain.spec.get_domain( - Epoch::new(0), + genesis_epoch, Domain::ApplicationMask(ApplicationDomain::Builder), &fork, Hash256::zero(), @@ -2281,11 +2420,13 @@ impl ApiTester { let message = data.signing_root(domain); let signature = keypair.sk.sign(message); - fee_recipients.push(fee_recipient); - registrations.push(SignedValidatorRegistrationData { + let signed = SignedValidatorRegistrationData { message: data, signature, - }); + }; + + fee_recipients.push(fee_recipient); + registrations.push(signed); } self.client @@ -2315,6 +2456,594 @@ impl ApiTester { self } + // Helper function for tests that require a valid RANDAO signature. + async fn get_test_randao(&self, slot: Slot, epoch: Epoch) -> (u64, SignatureBytes) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + let (proposer_pubkey_bytes, proposer_index) = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| (duty.pubkey, duty.validator_index)) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = + self.chain + .spec + .get_domain(epoch, Domain::Randao, &fork, genesis_validators_root); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + (proposer_index, randao_reveal) + } + + pub async fn test_payload_respects_registration(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + assert_eq!(payload.execution_payload_header.gas_limit, 11_111_111); + + // If this cache is empty, it indicates fallback was not used, so the payload came from the + // mock builder. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_accepts_mutated_gas_limit(self) -> Self { + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::GasLimit(30_000_000)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + assert_eq!(payload.execution_payload_header.gas_limit, 30_000_000); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_accepts_changed_fee_recipient(self) -> Self { + let test_fee_recipient = "0x4242424242424242424242424242424242424242" + .parse::
() + .unwrap(); + + // Mutate fee recipient. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::FeeRecipient(test_fee_recipient)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.fee_recipient, + test_fee_recipient + ); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_rejects_invalid_parent_hash(self) -> Self { + let invalid_parent_hash = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate parent hash. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::ParentHash(invalid_parent_hash)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_parent_hash = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_hash; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.parent_hash, + expected_parent_hash + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_prev_randao(self) -> Self { + let invalid_prev_randao = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate prev randao. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::PrevRandao(invalid_prev_randao)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_prev_randao = self + .chain + .canonical_head + .cached_head() + .head_random() + .unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.prev_randao, + expected_prev_randao + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_block_number(self) -> Self { + let invalid_block_number = 2; + + // Mutate block number. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::BlockNumber(invalid_block_number)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_block_number = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_number + + 1; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.block_number, + expected_block_number + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_timestamp(self) -> Self { + let invalid_timestamp = 2; + + // Mutate timestamp. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Timestamp(invalid_timestamp)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let min_expected_timestamp = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .timestamp; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert!(payload.execution_payload_header.timestamp > min_expected_timestamp); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_signature(self) -> Self { + self.mock_builder + .as_ref() + .unwrap() + .builder + .invalid_signatures(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_chain_health_skips(self) -> Self { + let slot = self.chain.slot().unwrap(); + + // Since we are proposing this slot, start the count from the previous slot. + let prev_slot = slot - Slot::new(1); + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + let epoch = self.chain.epoch().unwrap(); + + // Inclusive here to make sure we advance one slot past the threshold. + for _ in (prev_slot - head_slot).as_usize()..=self.chain.config.builder_fallback_skips { + self.harness.advance_slot(); + } + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_chain_health_skips_per_epoch(self) -> Self { + // Fill an epoch with `builder_fallback_skips_per_epoch` skip slots. + for i in 0..E::slots_per_epoch() { + if i == 0 || i as usize > self.chain.config.builder_fallback_skips_per_epoch { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + + // Without proposing, advance into the next slot, this should make us cross the threshold + // number of skips, causing us to use the fallback. + self.harness.advance_slot(); + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + self + } + + pub async fn test_builder_chain_health_epochs_since_finalization(self) -> Self { + let skips = E::slots_per_epoch() + * self.chain.config.builder_fallback_epochs_since_finalization as u64; + + for _ in 0..skips { + self.harness.advance_slot(); + } + + // Fill the next epoch with blocks, should be enough to justify, not finalize. + for _ in 0..E::slots_per_epoch() { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this + // scenario starts at an epoch boundary). + for _ in 0..E::slots_per_epoch() + 1 { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -2391,11 +3120,14 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_lighthouse_beacon_states_ssz(&state_id, &self.chain.spec) + .get_lighthouse_beacon_states_ssz(&state_id.0, &self.chain.spec) .await .unwrap(); - let mut expected = self.get_state(state_id); + let mut expected = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); expected.as_mut().map(|state| state.drop_all_caches()); assert_eq!(result, expected, "{:?}", state_id); @@ -2562,6 +3294,7 @@ impl ApiTester { let expected_block = EventKind::Block(SseBlock { block: block_root, slot: next_slot, + execution_optimistic: false, }); let expected_head = EventKind::Head(SseHead { @@ -2575,6 +3308,7 @@ impl ApiTester { .unwrap() .unwrap(), epoch_transition: true, + execution_optimistic: false, }); let finalized_block_root = self @@ -2593,6 +3327,7 @@ impl ApiTester { block: finalized_block_root, state: finalized_state_root, epoch: Epoch::new(3), + execution_optimistic: false, }); self.client @@ -2621,6 +3356,7 @@ impl ApiTester { new_head_block: self.reorg_block.canonical_root(), new_head_state: self.reorg_block.state_root(), epoch: self.next_block.slot().epoch(E::slots_per_epoch()), + execution_optimistic: false, }); self.client @@ -2687,6 +3423,7 @@ impl ApiTester { let expected_block = EventKind::Block(SseBlock { block: block_root, slot: next_slot, + execution_optimistic: false, }); let expected_head = EventKind::Head(SseHead { @@ -2696,6 +3433,7 @@ impl ApiTester { current_duty_dependent_root: self.chain.genesis_block_root, previous_duty_dependent_root: self.chain.genesis_block_root, epoch_transition: false, + execution_optimistic: false, }); self.client @@ -2708,6 +3446,40 @@ impl ApiTester { self } + + pub async fn test_check_optimistic_responses(&mut self) { + // Check responses are not optimistic. + let result = self + .client + .get_beacon_headers_block_id(CoreBlockId::Head) + .await + .unwrap() + .unwrap(); + + assert_eq!(result.execution_optimistic, Some(false)); + + // Change head to be optimistic. + self.chain + .canonical_head + .fork_choice_write_lock() + .proto_array_mut() + .core_proto_array_mut() + .nodes + .last_mut() + .map(|head_node| { + head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) + }); + + // Check responses are now optimistic. + let result = self + .client + .get_beacon_headers_block_id(CoreBlockId::Head) + .await + .unwrap() + .unwrap(); + + assert_eq!(result.execution_optimistic, Some(true)); + } } async fn poll_events, eth2::Error>> + Unpin, T: EthSpec>( @@ -2990,6 +3762,72 @@ async fn block_production_verify_randao_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_with_skip_slots_full_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_no_verify_randao_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_no_verify_randao::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_verify_randao_invalid_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_verify_randao_invalid::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_no_verify_randao::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_verify_randao_invalid_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_verify_randao_invalid::>() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_attestation_data() { ApiTester::new() @@ -3074,6 +3912,94 @@ async fn post_validator_register_validator() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_valid() { + ApiTester::new_mev_tester() + .await + .test_payload_respects_registration() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_gas_limit_mutation() { + ApiTester::new_mev_tester() + .await + .test_payload_accepts_mutated_gas_limit() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_fee_recipient_mutation() { + ApiTester::new_mev_tester() + .await + .test_payload_accepts_changed_fee_recipient() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_parent_hash() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_parent_hash() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_prev_randao() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_prev_randao() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_block_number() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_block_number() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_timestamp() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_timestamp() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_signature() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_signature() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_skips() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips_per_epoch() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_skips_per_epoch() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_epochs_since_finalization() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_epochs_since_finalization() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() @@ -3105,3 +4031,11 @@ async fn lighthouse_endpoints() { .test_post_lighthouse_liveness() .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn optimistic_responses() { + ApiTester::new_with_hard_forks(true, true) + .await + .test_check_optimistic_responses() + .await; +} diff --git a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs b/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs index 93687e555b..4842605f7a 100644 --- a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs @@ -30,9 +30,9 @@ pub struct GossipCache { proposer_slashing: Option, /// Timeout for attester slashings. attester_slashing: Option, - /// Timeout for aggregated sync commitee signatures. + /// Timeout for aggregated sync committee signatures. signed_contribution_and_proof: Option, - /// Timeout for sync commitee messages. + /// Timeout for sync committee messages. sync_committee_message: Option, } @@ -51,9 +51,9 @@ pub struct GossipCacheBuilder { proposer_slashing: Option, /// Timeout for attester slashings. attester_slashing: Option, - /// Timeout for aggregated sync commitee signatures. + /// Timeout for aggregated sync committee signatures. signed_contribution_and_proof: Option, - /// Timeout for sync commitee messages. + /// Timeout for sync committee messages. sync_committee_message: Option, } @@ -101,13 +101,13 @@ impl GossipCacheBuilder { self } - /// Timeout for aggregated sync commitee signatures. + /// Timeout for aggregated sync committee signatures. pub fn signed_contribution_and_proof_timeout(mut self, timeout: Duration) -> Self { self.signed_contribution_and_proof = Some(timeout); self } - /// Timeout for sync commitee messages. + /// Timeout for sync committee messages. pub fn sync_committee_message_timeout(mut self, timeout: Duration) -> Self { self.sync_committee_message = Some(timeout); self diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4b2b81060f..55b3884454 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -481,7 +481,15 @@ impl PeerManager { // implement a new sync type which tracks these peers and prevents the sync // algorithms from requesting blocks from them (at least for a set period of // time, multiple failures would then lead to a ban). - PeerAction::Fatal + + match direction { + // If the blocks request was initiated by us, then we have no use of this + // peer and so we ban it. + ConnectionDirection::Outgoing => PeerAction::Fatal, + // If the blocks request was initiated by the peer, then we let the peer decide if + // it wants to continue talking to us, we do not ban the peer. + ConnectionDirection::Incoming => return, + } } RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError, RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError, diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 6273356b8f..555266d0e2 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -477,7 +477,7 @@ pub enum ConnectionDirection { } /// Connection Status of the peer. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub enum PeerConnectionStatus { /// The peer is connected. Connected { @@ -507,6 +507,7 @@ pub enum PeerConnectionStatus { since: Instant, }, /// The connection status has not been specified. + #[default] Unknown, } @@ -561,9 +562,3 @@ impl Serialize for PeerConnectionStatus { } } } - -impl Default for PeerConnectionStatus { - fn default() -> Self { - PeerConnectionStatus::Unknown - } -} diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 3dd7ad8470..825b1088b2 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -78,18 +78,13 @@ impl std::fmt::Display for GossipKind { } /// The known encoding types for gossipsub messages. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Default)] pub enum GossipEncoding { /// Messages are encoded with SSZSnappy. + #[default] SSZSnappy, } -impl Default for GossipEncoding { - fn default() -> Self { - GossipEncoding::SSZSnappy - } -} - impl GossipTopic { pub fn new(kind: GossipKind, encoding: GossipEncoding, fork_digest: [u8; 4]) -> Self { GossipTopic { diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 6f75e1fb23..a08f34f707 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -66,7 +66,7 @@ use types::{ SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedUnaggregate, ReadyWork, + spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, }; use worker::{Toolbox, Worker}; @@ -75,7 +75,7 @@ mod tests; mod work_reprocessing_queue; mod worker; -use crate::beacon_processor::work_reprocessing_queue::QueuedBlock; +use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; pub use worker::{ ChainSegmentProcessId, FailureMode, GossipAggregatePackage, GossipAttestationPackage, }; @@ -501,6 +501,7 @@ impl WorkEvent { block, seen_timestamp, process_type, + should_process: true, }, } } @@ -565,7 +566,7 @@ impl WorkEvent { impl std::convert::From> for WorkEvent { fn from(ready_work: ReadyWork) -> Self { match ready_work { - ReadyWork::Block(QueuedBlock { + ReadyWork::Block(QueuedGossipBlock { peer_id, block, seen_timestamp, @@ -577,6 +578,20 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::RpcBlock(QueuedRpcBlock { + block, + seen_timestamp, + process_type, + should_process, + }) => Self { + drop_during_sync: false, + work: Work::RpcBlock { + block, + seen_timestamp, + process_type, + should_process, + }, + }, ReadyWork::Unaggregate(QueuedUnaggregate { peer_id, message_id, @@ -695,6 +710,7 @@ pub enum Work { block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, + should_process: bool, }, ChainSegment { process_id: ChainSegmentProcessId, @@ -1521,12 +1537,14 @@ impl BeaconProcessor { block, seen_timestamp, process_type, + should_process, } => task_spawner.spawn_async(worker.process_rpc_block( block, seen_timestamp, process_type, work_reprocessing_tx, duplicate_cache, + should_process, )), /* * Verification for a chain segment (multiple blocks). diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index a39ca2ec33..05854ac1e2 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -1,7 +1,9 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(test)] -use crate::beacon_processor::work_reprocessing_queue::QUEUED_ATTESTATION_DELAY; +use crate::beacon_processor::work_reprocessing_queue::{ + QUEUED_ATTESTATION_DELAY, QUEUED_RPC_BLOCK_DELAY, +}; use crate::beacon_processor::*; use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::test_utils::{ @@ -54,6 +56,7 @@ struct TestRig { work_journal_rx: mpsc::Receiver<&'static str>, _network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, + duplicate_cache: DuplicateCache, _harness: BeaconChainHarness, } @@ -185,6 +188,7 @@ impl TestRig { let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); + let duplicate_cache = DuplicateCache::default(); BeaconProcessor { beacon_chain: Arc::downgrade(&chain), network_tx, @@ -193,7 +197,7 @@ impl TestRig { executor, max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, - importing_blocks: Default::default(), + importing_blocks: duplicate_cache.clone(), log: log.clone(), } .spawn_manager(beacon_processor_rx, Some(work_journal_tx)); @@ -211,12 +215,13 @@ impl TestRig { work_journal_rx, _network_rx, _sync_rx, + duplicate_cache, _harness: harness, } } pub async fn recompute_head(&self) { - self.chain.recompute_head_at_current_slot().await.unwrap() + self.chain.recompute_head_at_current_slot().await } pub fn head_root(&self) -> Hash256 { @@ -246,6 +251,15 @@ impl TestRig { self.beacon_processor_tx.try_send(event).unwrap(); } + pub fn enqueue_single_lookup_rpc_block(&self) { + let event = WorkEvent::rpc_beacon_block( + self.next_block.clone(), + std::time::Duration::default(), + BlockProcessType::SingleBlock { id: 1 }, + ); + self.beacon_processor_tx.try_send(event).unwrap(); + } + pub fn enqueue_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); self.beacon_processor_tx @@ -828,3 +842,33 @@ async fn import_misc_gossip_ops() { "op pool should have one more exit" ); } + +/// Ensure that rpc block going to the reprocessing queue flow +/// works when the duplicate cache handle is held by another task. +#[tokio::test] +async fn test_rpc_block_reprocessing() { + let mut rig = TestRig::new(SMALL_CHAIN).await; + let next_block_root = rig.next_block.canonical_root(); + // Insert the next block into the duplicate cache manually + let handle = rig.duplicate_cache.check_and_insert(next_block_root); + rig.enqueue_single_lookup_rpc_block(); + + rig.assert_event_journal(&[RPC_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; + // next_block shouldn't be processed since it couldn't get the + // duplicate cache handle + assert_ne!(next_block_root, rig.head_root()); + + drop(handle); + + // The block should arrive at the beacon processor again after + // the specified delay. + tokio::time::sleep(QUEUED_RPC_BLOCK_DELAY).await; + + rig.assert_event_journal(&[RPC_BLOCK]).await; + // Add an extra delay for block processing + tokio::time::sleep(Duration::from_millis(10)).await; + // head should update to next block now since the duplicate + // cache handle was dropped. + assert_eq!(next_block_root, rig.head_root()); +} diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 33c15cf06b..efe8d3bf12 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -12,6 +12,7 @@ //! block will be re-queued until their block is imported, or until they expire. use super::MAX_SCHEDULED_WORK_QUEUE_LEN; use crate::metrics; +use crate::sync::manager::BlockProcessType; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use fnv::FnvHashMap; use futures::task::Poll; @@ -22,16 +23,18 @@ use slog::{crit, debug, error, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::pin::Pin; +use std::sync::Arc; use std::task::Context; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SubnetId}; +use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; -const BLOCKS: &str = "blocks"; +const GOSSIP_BLOCKS: &str = "gossip_blocks"; +const RPC_BLOCKS: &str = "rpc_blocks"; const ATTESTATIONS: &str = "attestations"; /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. @@ -41,6 +44,9 @@ const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); +/// For how long to queue rpc blocks before sending them back for reprocessing. +pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); + /// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that /// we signature-verify blocks before putting them in the queue *should* protect against this, but /// it's nice to have extra protection. @@ -52,7 +58,10 @@ const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; /// Messages that the scheduler can receive. pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. - EarlyBlock(QueuedBlock), + EarlyBlock(QueuedGossipBlock), + /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same + /// hash until the gossip block is imported. + RpcBlock(QueuedRpcBlock), /// A block that was successfully processed. We use this to handle attestations for unknown /// blocks. BlockImported(Hash256), @@ -64,7 +73,8 @@ pub enum ReprocessQueueMessage { /// Events sent by the scheduler once they are ready for re-processing. pub enum ReadyWork { - Block(QueuedBlock), + Block(QueuedGossipBlock), + RpcBlock(QueuedRpcBlock), Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), } @@ -90,16 +100,30 @@ pub struct QueuedAggregate { } /// A block that arrived early and has been queued for later import. -pub struct QueuedBlock { +pub struct QueuedGossipBlock { pub peer_id: PeerId, pub block: Box>, pub seen_timestamp: Duration, } +/// A block that arrived for processing when the same block was being imported over gossip. +/// It is queued for later import. +pub struct QueuedRpcBlock { + pub block: Arc>, + pub process_type: BlockProcessType, + pub seen_timestamp: Duration, + /// Indicates if the beacon chain should process this block or not. + /// We use this to ignore block processing when rpc block queues are full. + pub should_process: bool, +} + /// Unifies the different messages processed by the block delay queue. enum InboundEvent { - /// A block that was queued for later processing and is ready for import. - ReadyBlock(QueuedBlock), + /// A gossip block that was queued for later processing and is ready for import. + ReadyGossipBlock(QueuedGossipBlock), + /// A rpc block that was queued because the same gossip block was being imported + /// will now be retried for import. + ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), /// A `DelayQueue` returned an error. @@ -117,13 +141,15 @@ struct ReprocessQueue { /* Queues */ /// Queue to manage scheduled early blocks. - block_delay_queue: DelayQueue>, + gossip_block_delay_queue: DelayQueue>, + /// Queue to manage scheduled early blocks. + rpc_block_delay_queue: DelayQueue>, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. - queued_block_roots: HashSet, + queued_gossip_block_roots: HashSet, /// Queued aggregated attestations. queued_aggregates: FnvHashMap, DelayKey)>, /// Queued attestations. @@ -135,6 +161,7 @@ struct ReprocessQueue { /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, early_block_debounce: TimeLatch, + rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, } @@ -167,12 +194,26 @@ impl Stream for ReprocessQueue { // // The sequential nature of blockchains means it is generally better to try and import all // existing blocks before new ones. - match self.block_delay_queue.poll_expired(cx) { + match self.gossip_block_delay_queue.poll_expired(cx) { Poll::Ready(Some(Ok(queued_block))) => { - return Poll::Ready(Some(InboundEvent::ReadyBlock(queued_block.into_inner()))); + return Poll::Ready(Some(InboundEvent::ReadyGossipBlock( + queued_block.into_inner(), + ))); } Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "block_queue"))); + return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "gossip_block_queue"))); + } + // `Poll::Ready(None)` means that there are no more entries in the delay queue and we + // will continue to get this result until something else is added into the queue. + Poll::Ready(None) | Poll::Pending => (), + } + + match self.rpc_block_delay_queue.poll_expired(cx) { + Poll::Ready(Some(Ok(queued_block))) => { + return Poll::Ready(Some(InboundEvent::ReadyRpcBlock(queued_block.into_inner()))); + } + Poll::Ready(Some(Err(e))) => { + return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "rpc_block_queue"))); } // `Poll::Ready(None)` means that there are no more entries in the delay queue and we // will continue to get this result until something else is added into the queue. @@ -219,14 +260,16 @@ pub fn spawn_reprocess_scheduler( let mut queue = ReprocessQueue { work_reprocessing_rx, ready_work_tx, - block_delay_queue: DelayQueue::new(), + gossip_block_delay_queue: DelayQueue::new(), + rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), - queued_block_roots: HashSet::new(), + queued_gossip_block_roots: HashSet::new(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), next_attestation: 0, early_block_debounce: TimeLatch::default(), + rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), }; @@ -259,13 +302,13 @@ impl ReprocessQueue { let block_root = early_block.block.block_root; // Don't add the same block to the queue twice. This prevents DoS attacks. - if self.queued_block_roots.contains(&block_root) { + if self.queued_gossip_block_roots.contains(&block_root) { return; } if let Some(duration_till_slot) = slot_clock.duration_to_slot(block_slot) { // Check to ensure this won't over-fill the queue. - if self.queued_block_roots.len() >= MAXIMUM_QUEUED_BLOCKS { + if self.queued_gossip_block_roots.len() >= MAXIMUM_QUEUED_BLOCKS { if self.early_block_debounce.elapsed() { warn!( log, @@ -278,10 +321,10 @@ impl ReprocessQueue { return; } - self.queued_block_roots.insert(block_root); + self.queued_gossip_block_roots.insert(block_root); // Queue the block until the start of the appropriate slot, plus // `ADDITIONAL_QUEUED_BLOCK_DELAY`. - self.block_delay_queue.insert( + self.gossip_block_delay_queue.insert( early_block, duration_till_slot + ADDITIONAL_QUEUED_BLOCK_DELAY, ); @@ -311,6 +354,58 @@ impl ReprocessQueue { } } } + // A rpc block arrived for processing at the same time when a gossip block + // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY` + // and then send the rpc block back for processing assuming the gossip import + // has completed by then. + InboundEvent::Msg(RpcBlock(mut rpc_block)) => { + // Check to ensure this won't over-fill the queue. + if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { + if self.rpc_block_debounce.elapsed() { + warn!( + log, + "RPC blocks queue is full"; + "queue_size" => MAXIMUM_QUEUED_BLOCKS, + "msg" => "check system clock" + ); + } + // Return the block to the beacon processor signalling to + // ignore processing for this block + rpc_block.should_process = false; + if self + .ready_work_tx + .try_send(ReadyWork::RpcBlock(rpc_block)) + .is_err() + { + error!( + log, + "Failed to send rpc block to beacon processor"; + ); + } + return; + } + + // Queue the block for 1/4th of a slot + self.rpc_block_delay_queue + .insert(rpc_block, QUEUED_RPC_BLOCK_DELAY); + } + InboundEvent::ReadyRpcBlock(queued_rpc_block) => { + debug!( + log, + "Sending rpc block for reprocessing"; + "block_root" => %queued_rpc_block.block.canonical_root() + ); + if self + .ready_work_tx + .try_send(ReadyWork::RpcBlock(queued_rpc_block)) + .is_err() + { + error!( + log, + "Failed to send rpc block to beacon processor"; + ); + } + } InboundEvent::Msg(UnknownBlockAggregate(queued_aggregate)) => { if self.attestations_delay_queue.len() >= MAXIMUM_QUEUED_ATTESTATIONS { if self.attestation_delay_debounce.elapsed() { @@ -423,10 +518,10 @@ impl ReprocessQueue { } } // A block that was queued for later processing is now ready to be processed. - InboundEvent::ReadyBlock(ready_block) => { + InboundEvent::ReadyGossipBlock(ready_block) => { let block_root = ready_block.block.block_root; - if !self.queued_block_roots.remove(&block_root) { + if !self.queued_gossip_block_roots.remove(&block_root) { // Log an error to alert that we've made a bad assumption about how this // program works, but still process the block anyway. error!( @@ -499,8 +594,13 @@ impl ReprocessQueue { metrics::set_gauge_vec( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, - &[BLOCKS], - self.block_delay_queue.len() as i64, + &[GOSSIP_BLOCKS], + self.gossip_block_delay_queue.len() as i64, + ); + metrics::set_gauge_vec( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, + &[RPC_BLOCKS], + self.rpc_block_delay_queue.len() as i64, ); metrics::set_gauge_vec( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 3586050e16..b007cba4c6 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -6,7 +6,7 @@ use beacon_chain::{ observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, ForkChoiceError, + BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, GossipVerifiedBlock, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; @@ -25,7 +25,7 @@ use types::{ use super::{ super::work_reprocessing_queue::{ - QueuedAggregate, QueuedBlock, QueuedUnaggregate, ReprocessQueueMessage, + QueuedAggregate, QueuedGossipBlock, QueuedUnaggregate, ReprocessQueueMessage, }, Worker, }; @@ -789,9 +789,7 @@ impl Worker { return None; } // TODO(merge): reconsider peer scoring for this event. - Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) - | Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::UnverifiedNonOptimisticCandidate)) - | Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection)) => { + Err(ref e @BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -870,7 +868,7 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_REQUEUED_TOTAL); if reprocess_tx - .try_send(ReprocessQueueMessage::EarlyBlock(QueuedBlock { + .try_send(ReprocessQueueMessage::EarlyBlock(QueuedGossipBlock { peer_id, block: Box::new(verified_block), seen_timestamp: seen_duration, @@ -915,7 +913,11 @@ impl Worker { ) { let block: Arc<_> = verified_block.block.clone(); - match self.chain.process_block(verified_block).await { + match self + .chain + .process_block(verified_block, CountUnrealized::True) + .await + { Ok(block_root) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); @@ -938,21 +940,7 @@ impl Worker { "peer_id" => %peer_id ); - if let Err(e) = self.chain.recompute_head_at_current_slot().await { - error!( - self.log, - "Fork choice failed"; - "error" => ?e, - "location" => "block_gossip" - ) - } else { - debug!( - self.log, - "Fork choice success"; - "block" => ?block_root, - "location" => "block_gossip" - ) - } + self.chain.recompute_head_at_current_slot().await; } Err(BlockError::ParentUnknown { .. }) => { // Inform the sync manager to find parents for this block @@ -964,10 +952,7 @@ impl Worker { ); self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); } - Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) - | Err( - e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection), - ) => { + Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!( self.log, "Failed to verify execution payload"; diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 87d4da2c6d..8ca9c35e47 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -135,6 +135,7 @@ impl Worker { executor.spawn( async move { let mut send_block_count = 0; + let mut send_response = true; for root in request.block_roots.iter() { match self .chain @@ -157,6 +158,23 @@ impl Worker { "request_root" => ?root ); } + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { + debug!( + self.log, + "Failed to fetch execution payload for blocks by root request"; + "block_root" => ?root, + "reason" => "execution layer not synced", + ); + // send the stream terminator + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Execution layer not synced".into(), + request_id, + ); + send_response = false; + break; + } Err(e) => { debug!( self.log, @@ -173,11 +191,13 @@ impl Worker { "Received BlocksByRoot Request"; "peer" => %peer_id, "requested" => request.block_roots.len(), - "returned" => send_block_count + "returned" => %send_block_count ); // send stream termination - self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + if send_response { + self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + } drop(send_on_drop); }, "load_blocks_by_root_blocks", @@ -255,6 +275,7 @@ impl Worker { executor.spawn( async move { let mut blocks_sent = 0; + let mut send_response = true; for root in block_roots { match self.chain.get_block(&root).await { @@ -280,6 +301,23 @@ impl Worker { ); break; } + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { + debug!( + self.log, + "Failed to fetch execution payload for blocks by range request"; + "block_root" => ?root, + "reason" => "execution layer not synced", + ); + // send the stream terminator + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Execution layer not synced".into(), + request_id, + ); + send_response = false; + break; + } Err(e) => { error!( self.log, @@ -320,12 +358,15 @@ impl Worker { ); } - // send the stream terminator - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - response: Response::BlocksByRange(None), - id: request_id, - }); + if send_response { + // send the stream terminator + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlocksByRange(None), + id: request_id, + }); + } + drop(send_on_drop); }, "load_blocks_by_range_blocks", diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 804cfbe463..3b2429ee9b 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -1,12 +1,13 @@ use std::time::Duration; use super::{super::work_reprocessing_queue::ReprocessQueueMessage, Worker}; +use crate::beacon_processor::work_reprocessing_queue::QueuedRpcBlock; use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE; use crate::beacon_processor::DuplicateCache; use crate::metrics; use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; -use beacon_chain::ExecutionPayloadError; +use beacon_chain::CountUnrealized; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; @@ -20,7 +21,7 @@ use types::{Epoch, Hash256, SignedBeaconBlock}; #[derive(Clone, Debug, PartialEq)] pub enum ChainSegmentProcessId { /// Processing Id of a range syncing batch. - RangeBatchId(ChainId, Epoch), + RangeBatchId(ChainId, Epoch, CountUnrealized), /// Processing ID for a backfill syncing batch. BackSyncBatchId(Epoch), /// Processing Id of the parent lookup of a block. @@ -53,21 +54,42 @@ impl Worker { process_type: BlockProcessType, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, + should_process: bool, ) { + if !should_process { + // Sync handles these results + self.send_sync_message(SyncMessage::BlockProcessed { + process_type, + result: crate::sync::manager::BlockProcessResult::Ignored, + }); + return; + } // Check if the block is already being imported through another source let handle = match duplicate_cache.check_and_insert(block.canonical_root()) { Some(handle) => handle, None => { - // Sync handles these results - self.send_sync_message(SyncMessage::BlockProcessed { + debug!( + self.log, + "Gossip block is being processed"; + "action" => "sending rpc block to reprocessing queue", + "block_root" => %block.canonical_root(), + ); + // Send message to work reprocess queue to retry the block + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + block: block.clone(), process_type, - result: Err(BlockError::BlockIsAlreadyKnown), + seen_timestamp, + should_process: true, }); + + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block.canonical_root()) + }; return; } }; let slot = block.slot(); - let result = self.chain.process_block(block).await; + let result = self.chain.process_block(block, CountUnrealized::True).await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -89,13 +111,13 @@ impl Worker { None, ); - self.recompute_head("process_rpc_block").await; + self.chain.recompute_head_at_current_slot().await; } } // Sync handles these results self.send_sync_message(SyncMessage::BlockProcessed { process_type, - result: result.map(|_| ()), + result: result.into(), }); // Drop the handle to remove the entry from the cache @@ -111,12 +133,15 @@ impl Worker { ) { let result = match sync_type { // this a request from the range sync - ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch, count_unrealized) => { let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); - match self.process_blocks(downloaded_blocks.iter()).await { + match self + .process_blocks(downloaded_blocks.iter(), count_unrealized) + .await + { (_, Ok(_)) => { debug!(self.log, "Batch processed"; "batch_epoch" => epoch, @@ -185,7 +210,10 @@ impl Worker { ); // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse - match self.process_blocks(downloaded_blocks.iter().rev()).await { + match self + .process_blocks(downloaded_blocks.iter().rev(), CountUnrealized::True) + .await + { (imported_blocks, Err(e)) => { debug!(self.log, "Parent lookup failed"; "error" => %e.message); BatchProcessResult::Failed { @@ -209,13 +237,18 @@ impl Worker { async fn process_blocks<'a>( &self, downloaded_blocks: impl Iterator>>, + count_unrealized: CountUnrealized, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec> = downloaded_blocks.cloned().collect(); - match self.chain.process_chain_segment(blocks).await { + match self + .chain + .process_chain_segment(blocks, count_unrealized) + .await + { ChainSegmentResult::Successful { imported_blocks } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); if imported_blocks > 0 { - self.recompute_head("process_blocks_ok").await; + self.chain.recompute_head_at_current_slot().await; } (imported_blocks, Ok(())) } @@ -226,7 +259,7 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); let r = self.handle_failed_chain_segment(error); if imported_blocks > 0 { - self.recompute_head("process_blocks_err").await; + self.chain.recompute_head_at_current_slot().await; } (imported_blocks, r) } @@ -359,24 +392,6 @@ impl Worker { } } - /// Runs fork-choice on a given chain. This is used during block processing after one successful - /// block import. - async fn recompute_head(&self, location: &str) { - match self.chain.recompute_head_at_current_slot().await { - Ok(()) => debug!( - self.log, - "Fork choice success"; - "location" => location - ), - Err(e) => error!( - self.log, - "Fork choice failed"; - "error" => ?e, - "location" => location - ), - } - } - /// Helper function to handle a `BlockError` from `process_chain_segment` fn handle_failed_chain_segment( &self, @@ -452,24 +467,22 @@ impl Worker { mode: FailureMode::ConsensusLayer, }) } - BlockError::ExecutionPayloadError(e) => match &e { - ExecutionPayloadError::NoExecutionConnection { .. } - | ExecutionPayloadError::RequestFailed { .. } => { + ref err @ BlockError::ExecutionPayloadError(ref epe) => { + if !epe.penalize_peer() { // These errors indicate an issue with the EL and not the `ChainSegment`. // Pause the syncing while the EL recovers debug!(self.log, "Execution layer verification failed"; "outcome" => "pausing sync", - "err" => ?e + "err" => ?err ); Err(ChainSegmentFailed { - message: format!("Execution layer offline. Reason: {:?}", e), + message: format!("Execution layer offline. Reason: {:?}", err), // Do not penalize peers for internal errors. peer_action: None, mode: FailureMode::ExecutionLayer { pause_sync: true }, }) - } - err => { + } else { debug!(self.log, "Invalid execution payload"; "error" => ?err @@ -483,7 +496,7 @@ impl Worker { mode: FailureMode::ExecutionLayer { pause_sync: false }, }) } - }, + } other => { debug!( self.log, "Invalid block received"; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index c21183608a..9e3302af24 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -362,7 +362,7 @@ impl NetworkService { Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), // process any sync committee service events - Some(msg) = self.sync_committee_service.next() => self.on_sync_commitee_service_message(msg), + Some(msg) = self.sync_committee_service.next() => self.on_sync_committee_service_message(msg), event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, @@ -774,7 +774,7 @@ impl NetworkService { } } - fn on_sync_commitee_service_message(&mut self, msg: SubnetServiceMessage) { + fn on_sync_committee_service_message(&mut self, msg: SubnetServiceMessage) { match msg { SubnetServiceMessage::Subscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 99df8e4a66..2aa4acdb5a 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -1,7 +1,7 @@ use std::collections::hash_map::Entry; use std::time::Duration; -use beacon_chain::{BeaconChainTypes, BlockError, ExecutionPayloadError}; +use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; @@ -19,6 +19,7 @@ use self::{ single_block_lookup::SingleBlockRequest, }; +use super::manager::BlockProcessResult; use super::BatchProcessResult; use super::{ manager::{BlockProcessType, Id}, @@ -68,6 +69,8 @@ impl BlockLookups { /* Lookup requests */ + /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is + /// constructed. pub fn search_block( &mut self, hash: Hash256, @@ -104,6 +107,8 @@ impl BlockLookups { } } + /// If a block is attempted to be processed but we do not know its parent, this function is + /// called in order to find the block's parent. pub fn search_parent( &mut self, block: Arc>, @@ -200,6 +205,7 @@ impl BlockLookups { ); } + /// Process a response received from a parent lookup request. pub fn parent_lookup_response( &mut self, id: Id, @@ -247,7 +253,7 @@ impl BlockLookups { | VerifyError::ExtraBlocksReturned => { let e = e.into(); warn!(self.log, "Peer sent invalid response to parent request."; - "peer_id" => %peer_id, "reason" => e); + "peer_id" => %peer_id, "reason" => %e); // We do not tolerate these kinds of errors. We will accept a few but these are signs // of a faulty peer. @@ -257,7 +263,6 @@ impl BlockLookups { self.request_parent(parent_lookup, cx); } VerifyError::PreviousFailure { parent_root } => { - self.failed_chains.insert(parent_lookup.chain_hash()); debug!( self.log, "Parent chain ignored due to past failure"; @@ -335,6 +340,7 @@ impl BlockLookups { } } + /// An RPC error has occurred during a parent lookup. This function handles this case. pub fn parent_lookup_failed( &mut self, id: Id, @@ -361,7 +367,7 @@ impl BlockLookups { pub fn single_block_lookup_failed(&mut self, id: Id, cx: &mut SyncNetworkContext) { if let Some(mut request) = self.single_block_lookups.remove(&id) { - request.register_failure(); + request.register_failure_downloading(); trace!(self.log, "Single block lookup failed"; "block" => %request.hash); if let Ok((peer_id, block_request)) = request.request_block() { if let Ok(request_id) = cx.single_block_lookup_request(peer_id, block_request) { @@ -381,7 +387,7 @@ impl BlockLookups { pub fn single_block_processed( &mut self, id: Id, - result: Result<(), BlockError>, + result: BlockProcessResult, cx: &mut SyncNetworkContext, ) { let mut req = match self.single_block_lookups.remove(&id) { @@ -403,52 +409,57 @@ impl BlockLookups { Err(_) => return, }; - if let Err(e) = &result { - trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); - } else { - trace!(self.log, "Single block processing succeeded"; "block" => %root); - } - - if let Err(e) = result { - match e { - BlockError::BlockIsAlreadyKnown => { - // No error here - } - BlockError::BeaconChainError(e) => { - // Internal error - error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); - } - BlockError::ParentUnknown(block) => { - self.search_parent(block, peer_id, cx); - } - - e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_)) - | e @ BlockError::ExecutionPayloadError( - ExecutionPayloadError::NoExecutionConnection, - ) => { - // These errors indicate that the execution layer is offline - // and failed to validate the execution payload. Do not downscore peer. - debug!( - self.log, - "Single block lookup failed. Execution layer is offline"; - "root" => %root, - "error" => ?e - ); - } - other => { - warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "single_block_failure", - ); - - // Try it again if possible. - req.register_failure(); - if let Ok((peer_id, request)) = req.request_block() { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) { - // insert with the new id - self.single_block_lookups.insert(request_id, req); + match result { + BlockProcessResult::Ok => { + trace!(self.log, "Single block processing succeeded"; "block" => %root); + } + BlockProcessResult::Ignored => { + // Beacon processor signalled to ignore the block processing result. + // This implies that the cpu is overloaded. Drop the request. + warn!( + self.log, + "Single block processing was ignored, cpu might be overloaded"; + "action" => "dropping single block request" + ); + } + BlockProcessResult::Err(e) => { + trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); + match e { + BlockError::BlockIsAlreadyKnown => { + // No error here + } + BlockError::BeaconChainError(e) => { + // Internal error + error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + } + BlockError::ParentUnknown(block) => { + self.search_parent(block, peer_id, cx); + } + ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { + // These errors indicate that the execution layer is offline + // and failed to validate the execution payload. Do not downscore peer. + debug!( + self.log, + "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; + "root" => %root, + "error" => ?e + ); + } + other => { + warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); + cx.report_peer( + peer_id, + PeerAction::MidToleranceError, + "single_block_failure", + ); + // Try it again if possible. + req.register_failure_processing(); + if let Ok((peer_id, request)) = req.request_block() { + if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) + { + // insert with the new id + self.single_block_lookups.insert(request_id, req); + } } } } @@ -464,7 +475,7 @@ impl BlockLookups { pub fn parent_block_processed( &mut self, chain_hash: Hash256, - result: Result<(), BlockError>, + result: BlockProcessResult, cx: &mut SyncNetworkContext, ) { let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self @@ -487,20 +498,32 @@ impl BlockLookups { return crit!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; - if let Err(e) = &result { - trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e); - } else { - trace!(self.log, "Parent block processing succeeded"; &parent_lookup); + match &result { + BlockProcessResult::Ok => { + trace!(self.log, "Parent block processing succeeded"; &parent_lookup) + } + BlockProcessResult::Err(e) => { + trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e) + } + BlockProcessResult::Ignored => { + trace!( + self.log, + "Parent block processing job was ignored"; + "action" => "re-requesting block", + &parent_lookup + ); + } } match result { - Err(BlockError::ParentUnknown(block)) => { + BlockProcessResult::Err(BlockError::ParentUnknown(block)) => { // need to keep looking for parents // add the block back to the queue and continue the search parent_lookup.add_block(block); self.request_parent(parent_lookup, cx); } - Ok(_) | Err(BlockError::BlockIsAlreadyKnown { .. }) => { + BlockProcessResult::Ok + | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { let chain_hash = parent_lookup.chain_hash(); let blocks = parent_lookup.chain_blocks(); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); @@ -521,10 +544,9 @@ impl BlockLookups { } } } - Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) - | Err( - e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection), - ) => { + ref e @ BlockProcessResult::Err(BlockError::ExecutionPayloadError(ref epe)) + if !epe.penalize_peer() => + { // These errors indicate that the execution layer is offline // and failed to validate the execution payload. Do not downscore peer. debug!( @@ -534,7 +556,7 @@ impl BlockLookups { "error" => ?e ); } - Err(outcome) => { + BlockProcessResult::Err(outcome) => { // all else we consider the chain a failure and downvote the peer that sent // us the last block warn!( @@ -544,12 +566,22 @@ impl BlockLookups { "last_peer" => %peer_id, ); - // Add this chain to cache of failed chains - self.failed_chains.insert(chain_hash); - // This currently can be a host of errors. We permit this due to the partial // ambiguity. cx.report_peer(peer_id, PeerAction::MidToleranceError, "parent_request_err"); + + // Try again if possible + parent_lookup.processing_failed(); + self.request_parent(parent_lookup, cx); + } + BlockProcessResult::Ignored => { + // Beacon processor signalled to ignore the block processing result. + // This implies that the cpu is overloaded. Drop the request. + warn!( + self.log, + "Parent block processing was ignored, cpu might be overloaded"; + "action" => "dropping parent request" + ); } } @@ -649,14 +681,26 @@ impl BlockLookups { parent_lookup::RequestError::SendFailed(_) => { // Probably shutting down, nothing to do here. Drop the request } - parent_lookup::RequestError::ChainTooLong - | parent_lookup::RequestError::TooManyAttempts => { + parent_lookup::RequestError::ChainTooLong => { self.failed_chains.insert(parent_lookup.chain_hash()); // This indicates faulty peers. for &peer_id in parent_lookup.used_peers() { cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) } } + parent_lookup::RequestError::TooManyAttempts { cannot_process } => { + // We only consider the chain failed if we were unable to process it. + // We could have failed because one peer continually failed to send us + // bad blocks. We still allow other peers to send us this chain. Note + // that peers that do this, still get penalised. + if cannot_process { + self.failed_chains.insert(parent_lookup.chain_hash()); + } + // This indicates faulty peers. + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) + } + } parent_lookup::RequestError::NoPeers => { // This happens if the peer disconnects while the block is being // processed. Drop the request without extra penalty diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 62503353ad..bf5a1b259b 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -10,7 +10,7 @@ use crate::sync::{ use super::single_block_lookup::{self, SingleBlockRequest}; -/// How many attempts we try to find a parent of a block before we give up trying . +/// How many attempts we try to find a parent of a block before we give up trying. pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth @@ -41,7 +41,12 @@ pub enum VerifyError { pub enum RequestError { SendFailed(&'static str), ChainTooLong, - TooManyAttempts, + /// We witnessed too many failures trying to complete this parent lookup. + TooManyAttempts { + /// We received more failures trying to process the blocks than downloading them + /// from peers. + cannot_process: bool, + }, NoPeers, } @@ -105,7 +110,12 @@ impl ParentLookup { } pub fn download_failed(&mut self) { - self.current_parent_request.register_failure(); + self.current_parent_request.register_failure_downloading(); + self.current_parent_request_id = None; + } + + pub fn processing_failed(&mut self) { + self.current_parent_request.register_failure_processing(); self.current_parent_request_id = None; } @@ -126,7 +136,7 @@ impl ParentLookup { // be dropped and the peer downscored. if let Some(parent_root) = block.as_ref().map(|block| block.parent_root()) { if failed_chains.contains(&parent_root) { - self.current_parent_request.register_failure(); + self.current_parent_request.register_failure_downloading(); self.current_parent_request_id = None; return Err(VerifyError::PreviousFailure { parent_root }); } @@ -144,7 +154,7 @@ impl ParentLookup { #[cfg(test)] pub fn failed_attempts(&self) -> u8 { - self.current_parent_request.failed_attempts + self.current_parent_request.failed_attempts() } pub fn add_peer(&mut self, block_root: &Hash256, peer_id: &PeerId) -> bool { @@ -171,7 +181,9 @@ impl From for RequestError { fn from(e: super::single_block_lookup::LookupRequestError) -> Self { use super::single_block_lookup::LookupRequestError as E; match e { - E::TooManyAttempts => RequestError::TooManyAttempts, + E::TooManyAttempts { cannot_process } => { + RequestError::TooManyAttempts { cannot_process } + } E::NoPeers => RequestError::NoPeers, } } @@ -195,7 +207,10 @@ impl RequestError { match self { RequestError::SendFailed(e) => e, RequestError::ChainTooLong => "chain_too_long", - RequestError::TooManyAttempts => "too_many_attempts", + RequestError::TooManyAttempts { cannot_process } if *cannot_process => { + "too_many_processing_attempts" + } + RequestError::TooManyAttempts { cannot_process: _ } => "too_many_downloading_attempts", RequestError::NoPeers => "no_peers", } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index debf3de8db..8ba5b17bfa 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -18,8 +18,10 @@ pub struct SingleBlockRequest { pub available_peers: HashSet, /// Peers from which we have requested this block. pub used_peers: HashSet, - /// How many times have we attempted this block. - pub failed_attempts: u8, + /// How many times have we attempted to process this block. + failed_processing: u8, + /// How many times have we attempted to download this block. + failed_downloading: u8, } #[derive(Debug, PartialEq, Eq)] @@ -38,7 +40,11 @@ pub enum VerifyError { #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupRequestError { - TooManyAttempts, + /// Too many failed attempts + TooManyAttempts { + /// The failed attempts were primarily due to processing failures. + cannot_process: bool, + }, NoPeers, } @@ -49,15 +55,29 @@ impl SingleBlockRequest { state: State::AwaitingDownload, available_peers: HashSet::from([peer_id]), used_peers: HashSet::default(), - failed_attempts: 0, + failed_processing: 0, + failed_downloading: 0, } } - pub fn register_failure(&mut self) { - self.failed_attempts += 1; + /// Registers a failure in processing a block. + pub fn register_failure_processing(&mut self) { + self.failed_processing = self.failed_processing.saturating_add(1); self.state = State::AwaitingDownload; } + /// Registers a failure in downloading a block. This might be a peer disconnection or a wrong + /// block. + pub fn register_failure_downloading(&mut self) { + self.failed_downloading = self.failed_downloading.saturating_add(1); + self.state = State::AwaitingDownload; + } + + /// The total number of failures, whether it be processing or downloading. + pub fn failed_attempts(&self) -> u8 { + self.failed_processing + self.failed_downloading + } + pub fn add_peer(&mut self, hash: &Hash256, peer_id: &PeerId) -> bool { let is_useful = &self.hash == hash; if is_useful { @@ -72,7 +92,7 @@ impl SingleBlockRequest { if let State::Downloading { peer_id } = &self.state { if peer_id == dc_peer_id { // Peer disconnected before providing a block - self.register_failure(); + self.register_failure_downloading(); return Err(()); } } @@ -87,14 +107,16 @@ impl SingleBlockRequest { ) -> Result>>, VerifyError> { match self.state { State::AwaitingDownload => { - self.register_failure(); + self.register_failure_downloading(); Err(VerifyError::ExtraBlocksReturned) } State::Downloading { peer_id } => match block { Some(block) => { if block.canonical_root() != self.hash { // return an error and drop the block - self.register_failure(); + // NOTE: we take this is as a download failure to prevent counting the + // attempt as a chain failure, but simply a peer failure. + self.register_failure_downloading(); Err(VerifyError::RootMismatch) } else { // Return the block for processing. @@ -103,14 +125,14 @@ impl SingleBlockRequest { } } None => { - self.register_failure(); + self.register_failure_downloading(); Err(VerifyError::NoBlockReturned) } }, State::Processing { peer_id: _ } => match block { Some(_) => { // We sent the block for processing and received an extra block. - self.register_failure(); + self.register_failure_downloading(); Err(VerifyError::ExtraBlocksReturned) } None => { @@ -124,19 +146,19 @@ impl SingleBlockRequest { pub fn request_block(&mut self) -> Result<(PeerId, BlocksByRootRequest), LookupRequestError> { debug_assert!(matches!(self.state, State::AwaitingDownload)); - if self.failed_attempts <= MAX_ATTEMPTS { - if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { - let request = BlocksByRootRequest { - block_roots: VariableList::from(vec![self.hash]), - }; - self.state = State::Downloading { peer_id }; - self.used_peers.insert(peer_id); - Ok((peer_id, request)) - } else { - Err(LookupRequestError::NoPeers) - } + if self.failed_attempts() >= MAX_ATTEMPTS { + Err(LookupRequestError::TooManyAttempts { + cannot_process: self.failed_processing >= self.failed_downloading, + }) + } else if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { + let request = BlocksByRootRequest { + block_roots: VariableList::from(vec![self.hash]), + }; + self.state = State::Downloading { peer_id }; + self.used_peers.insert(peer_id); + Ok((peer_id, request)) } else { - Err(LookupRequestError::TooManyAttempts) + Err(LookupRequestError::NoPeers) } } @@ -169,6 +191,8 @@ impl slog::Value for SingleBlockRequest { serializer.emit_arguments("processing_peer", &format_args!("{}", peer_id))? } } + serializer.emit_u8("failed_downloads", self.failed_downloading)?; + serializer.emit_u8("failed_processing", self.failed_processing)?; slog::Result::Ok(()) } } @@ -200,11 +224,28 @@ mod tests { } #[test] - fn test_max_attempts() { + fn test_block_lookup_failures() { + const FAILURES: u8 = 3; let peer_id = PeerId::random(); let block = rand_block(); - let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); - sl.register_failure(); + let mut sl = SingleBlockRequest::::new(block.canonical_root(), peer_id); + for _ in 1..FAILURES { + sl.request_block().unwrap(); + sl.register_failure_downloading(); + } + + // Now we receive the block and send it for processing + sl.request_block().unwrap(); + sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); + + // One processing failure maxes the available attempts + sl.register_failure_processing(); + assert_eq!( + sl.request_block(), + Err(LookupRequestError::TooManyAttempts { + cannot_process: false + }) + ) } } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index e9c8ac8ca7..b3afadda2c 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -168,7 +168,7 @@ fn test_single_block_lookup_happy_path() { // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); - bl.single_block_processed(id, Ok(()), &mut cx); + bl.single_block_processed(id, Ok(()).into(), &mut cx); rig.expect_empty_network(); assert_eq!(bl.single_block_lookups.len(), 0); } @@ -252,7 +252,11 @@ fn test_single_block_lookup_becomes_parent_request() { // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. - bl.single_block_processed(id, Err(BlockError::ParentUnknown(Arc::new(block))), &mut cx); + bl.single_block_processed( + id, + BlockError::ParentUnknown(Arc::new(block)).into(), + &mut cx, + ); assert_eq!(bl.single_block_lookups.len(), 0); rig.expect_parent_request(); rig.expect_empty_network(); @@ -278,7 +282,7 @@ fn test_parent_lookup_happy_path() { rig.expect_empty_network(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Err(BlockError::BlockIsAlreadyKnown), &mut cx); + bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx); rig.expect_parent_chain_process(); bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); assert_eq!(bl.parent_queue.len(), 0); @@ -312,7 +316,7 @@ fn test_parent_lookup_wrong_response() { rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()), &mut cx); + bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); assert_eq!(bl.parent_queue.len(), 0); @@ -341,7 +345,7 @@ fn test_parent_lookup_empty_response() { rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()), &mut cx); + bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); assert_eq!(bl.parent_queue.len(), 0); @@ -369,7 +373,7 @@ fn test_parent_lookup_rpc_failure() { rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()), &mut cx); + bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); assert_eq!(bl.parent_queue.len(), 0); @@ -381,12 +385,11 @@ fn test_parent_lookup_too_many_attempts() { let parent = rig.rand_block(); let block = rig.block_with_parent(parent.canonical_root()); - let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); // Trigger the request bl.search_parent(Arc::new(block), peer_id, &mut cx); - for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE + 1 { + for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { let id = rig.expect_parent_request(); match i % 2 { // make sure every error is accounted for @@ -398,6 +401,8 @@ fn test_parent_lookup_too_many_attempts() { // Send a bad block this time. It should be tried again. let bad_block = rig.rand_block(); bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + // Send the stream termination + bl.parent_lookup_response(id, peer_id, None, D, &mut cx); rig.expect_penalty(); } } @@ -407,7 +412,74 @@ fn test_parent_lookup_too_many_attempts() { } assert_eq!(bl.parent_queue.len(), 0); - assert!(bl.failed_chains.contains(&chain_hash)); +} + +#[test] +fn test_parent_lookup_too_many_download_attempts_no_blacklist() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let parent = rig.rand_block(); + let block = rig.block_with_parent(parent.canonical_root()); + let block_hash = block.canonical_root(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_parent(Arc::new(block), peer_id, &mut cx); + for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { + assert!(!bl.failed_chains.contains(&block_hash)); + let id = rig.expect_parent_request(); + if i % 2 != 0 { + // The request fails. It should be tried again. + bl.parent_lookup_failed(id, peer_id, &mut cx); + } else { + // Send a bad block this time. It should be tried again. + let bad_block = rig.rand_block(); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + rig.expect_penalty(); + } + if i < parent_lookup::PARENT_FAIL_TOLERANCE { + assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i)); + } + } + + assert_eq!(bl.parent_queue.len(), 0); + assert!(!bl.failed_chains.contains(&block_hash)); + assert!(!bl.failed_chains.contains(&parent.canonical_root())); +} + +#[test] +fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { + const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let parent = Arc::new(rig.rand_block()); + let block = rig.block_with_parent(parent.canonical_root()); + let block_hash = block.canonical_root(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_parent(Arc::new(block), peer_id, &mut cx); + + // Fail downloading the block + for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { + let id = rig.expect_parent_request(); + // The request fails. It should be tried again. + bl.parent_lookup_failed(id, peer_id, &mut cx); + } + + // Now fail processing a block in the parent request + for _ in 0..PROCESSING_FAILURES { + let id = dbg!(rig.expect_parent_request()); + assert!(!bl.failed_chains.contains(&block_hash)); + // send the right parent but fail processing + bl.parent_lookup_response(id, peer_id, Some(parent.clone()), D, &mut cx); + bl.parent_block_processed(block_hash, BlockError::InvalidSignature.into(), &mut cx); + bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + rig.expect_penalty(); + } + + assert!(bl.failed_chains.contains(&block_hash)); + assert_eq!(bl.parent_queue.len(), 0); } #[test] @@ -440,7 +512,7 @@ fn test_parent_lookup_too_deep() { // the processing result bl.parent_block_processed( chain_hash, - Err(BlockError::ParentUnknown(Arc::new(block))), + BlockError::ParentUnknown(Arc::new(block)).into(), &mut cx, ) } @@ -458,3 +530,56 @@ fn test_parent_lookup_disconnection() { bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_queue.is_empty()); } + +#[test] +fn test_single_block_lookup_ignored_response() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let block = rig.rand_block(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_block(block.canonical_root(), peer_id, &mut cx); + let id = rig.expect_block_request(); + + // The peer provides the correct block, should not be penalized. Now the block should be sent + // for processing. + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); + rig.expect_empty_network(); + rig.expect_block_process(); + + // The request should still be active. + assert_eq!(bl.single_block_lookups.len(), 1); + + // Send the stream termination. Peer should have not been penalized, and the request removed + // after processing. + bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); + // Send an Ignored response, the request should be dropped + bl.single_block_processed(id, BlockProcessResult::Ignored, &mut cx); + rig.expect_empty_network(); + assert_eq!(bl.single_block_lookups.len(), 0); +} + +#[test] +fn test_parent_lookup_ignored_response() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let parent = rig.rand_block(); + let block = rig.block_with_parent(parent.canonical_root()); + let chain_hash = block.canonical_root(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_parent(Arc::new(block), peer_id, &mut cx); + let id = rig.expect_parent_request(); + + // Peer sends the right block, it should be sent for processing. Peer should not be penalized. + bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); + rig.expect_block_process(); + rig.expect_empty_network(); + + // Return an Ignored result. The request should be dropped + bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx); + rig.expect_empty_network(); + assert_eq!(bl.parent_queue.len(), 0); +} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 3e44256655..fe27a33c5c 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -117,7 +117,7 @@ pub enum SyncMessage { /// Block processed BlockProcessed { process_type: BlockProcessType, - result: Result<(), BlockError>, + result: BlockProcessResult, }, } @@ -128,6 +128,13 @@ pub enum BlockProcessType { ParentLookup { chain_hash: Hash256 }, } +#[derive(Debug)] +pub enum BlockProcessResult { + Ok, + Err(BlockError), + Ignored, +} + /// The result of processing multiple blocks (a chain segment). #[derive(Debug)] pub enum BatchProcessResult { @@ -525,7 +532,7 @@ impl SyncManager { .parent_block_processed(chain_hash, result, &mut self.network), }, SyncMessage::BatchProcessed { sync_type, result } => match sync_type { - ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => { self.range_sync.handle_block_process_result( &mut self.network, chain_id, @@ -620,3 +627,18 @@ impl SyncManager { } } } + +impl From>> for BlockProcessResult { + fn from(result: Result>) -> Self { + match result { + Ok(_) => BlockProcessResult::Ok, + Err(e) => e.into(), + } + } +} + +impl From> for BlockProcessResult { + fn from(e: BlockError) -> Self { + BlockProcessResult::Err(e) + } +} diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index ef5ba23e66..caa08165a9 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -2,7 +2,7 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::beacon_processor::{ChainSegmentProcessId, FailureMode}; use crate::sync::{manager::Id, network_context::SyncNetworkContext, BatchProcessResult}; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChainTypes, CountUnrealized}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use rand::seq::SliceRandom; @@ -100,6 +100,8 @@ pub struct SyncingChain { /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. beacon_processor_send: Sender>, + is_finalized_segment: bool, + /// The chain's log. log: slog::Logger, } @@ -126,6 +128,7 @@ impl SyncingChain { target_head_root: Hash256, peer_id: PeerId, beacon_processor_send: Sender>, + is_finalized_segment: bool, log: &slog::Logger, ) -> Self { let mut peers = FnvHashMap::default(); @@ -148,6 +151,7 @@ impl SyncingChain { current_processing_batch: None, validated_batches: 0, beacon_processor_send, + is_finalized_segment, log: log.new(o!("chain" => id)), } } @@ -302,7 +306,12 @@ impl SyncingChain { // for removing chains and checking completion is in the callback. let blocks = batch.start_processing()?; - let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id); + let count_unrealized = if self.is_finalized_segment { + CountUnrealized::False + } else { + CountUnrealized::True + }; + let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized); self.current_processing_batch = Some(batch_id); if let Err(e) = self diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 7ddfc3f70a..e76adff3af 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -472,10 +472,10 @@ impl ChainCollection { network: &mut SyncNetworkContext, ) { let id = SyncingChain::::id(&target_head_root, &target_head_slot); - let collection = if let RangeSyncType::Finalized = sync_type { - &mut self.finalized_chains + let (collection, is_finalized) = if let RangeSyncType::Finalized = sync_type { + (&mut self.finalized_chains, true) } else { - &mut self.head_chains + (&mut self.head_chains, false) }; match collection.entry(id) { Entry::Occupied(mut entry) => { @@ -501,6 +501,7 @@ impl ChainCollection { target_head_root, peer, beacon_processor_send.clone(), + is_finalized, &self.log, ); debug_assert_eq!(new_chain.get_id(), id); diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 964873a949..3515263878 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -148,7 +148,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { If a DNS address is provided, the enr-address is set to the IP address it resolves to and \ does not auto-update based on PONG responses in discovery. \ Set this only if you are sure other nodes can connect to your local node on this address. \ - Discovery will automatically find your external address,if possible.") + Discovery will automatically find your external address, if possible.") .requires("enr-udp-port") .takes_value(true), ) @@ -441,7 +441,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .alias("jwt-id") .help("Used by the beacon node to communicate a unique identifier to execution nodes \ during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ - Set to empty by deafult") + Set to empty by default") .takes_value(true) ) .arg( @@ -451,7 +451,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .alias("jwt-version") .help("Used by the beacon node to communicate a client version to execution nodes \ during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ - Set to empty by deafult") + Set to empty by default") .takes_value(true) ) .arg( @@ -708,4 +708,53 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("250") .takes_value(true) ) + .arg( + Arg::with_name("builder-fallback-skips") + .long("builder-fallback-skips") + .help("If this node is proposing a block and has seen this number of skip slots \ + on the canonical chain in a row, it will NOT query any connected builders, \ + and will use the local execution engine for payload construction.") + .default_value("3") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-skips-per-epoch") + .long("builder-fallback-skips-per-epoch") + .help("If this node is proposing a block and has seen this number of skip slots \ + on the canonical chain in the past `SLOTS_PER_EPOCH`, it will NOT query \ + any connected builders, and will use the local execution engine for \ + payload construction.") + .default_value("8") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-epochs-since-finalization") + .long("builder-fallback-epochs-since-finalization") + .help("If this node is proposing a block and the chain has not finalized within \ + this number of epochs, it will NOT query any connected builders, \ + and will use the local execution engine for payload construction. Setting \ + this value to anything less than 2 will cause the node to NEVER query \ + connected builders. Setting it to 2 will cause this condition to be hit \ + if there are skips slots at the start of an epoch, right before this node \ + is set to propose.") + .default_value("3") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-disable-checks") + .long("builder-fallback-disable-checks") + .help("This flag disables all checks related to chain health. This means the builder \ + API will always be used for payload construction, regardless of recent chain \ + conditions.") + .takes_value(false) + ) + .arg( + Arg::with_name("count-unrealized") + .long("count-unrealized") + .hidden(true) + .help("Enables an alternative, potentially more performant FFG \ + vote tracking method.") + .takes_value(true) + .default_value("true") + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c91bd711e5..6daee50de0 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -630,6 +630,23 @@ pub fn get_config( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } + client_config.chain.count_unrealized = + clap_utils::parse_required(cli_args, "count-unrealized")?; + + /* + * Builder fallback configs. + */ + client_config.chain.builder_fallback_skips = + clap_utils::parse_required(cli_args, "builder-fallback-skips")?; + client_config.chain.builder_fallback_skips_per_epoch = + clap_utils::parse_required(cli_args, "builder-fallback-skips-per-epoch")?; + client_config + .chain + .builder_fallback_epochs_since_finalization = + clap_utils::parse_required(cli_args, "builder-fallback-epochs-since-finalization")?; + client_config.chain.builder_fallback_disable_checks = + cli_args.is_present("builder-fallback-disable-checks"); + Ok(client_config) } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index e66cee6fde..c4b4a64a05 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1317,7 +1317,7 @@ impl, Cold: ItemStore> HotColdDB } /// Load a frozen state's slot, given its root. - fn load_cold_state_slot(&self, state_root: &Hash256) -> Result, Error> { + pub fn load_cold_state_slot(&self, state_root: &Hash256) -> Result, Error> { Ok(self .cold_db .get(state_root)? @@ -1583,7 +1583,7 @@ fn no_state_root_iter() -> Option, Vec>; +type DBKeyMap = HashMap, HashSet>>; /// A thread-safe `HashMap` wrapper. pub struct MemoryStore { db: RwLock, + col_keys: RwLock, transaction_mutex: Mutex<()>, _phantom: PhantomData, } @@ -18,6 +21,7 @@ impl MemoryStore { pub fn open() -> Self { Self { db: RwLock::new(HashMap::new()), + col_keys: RwLock::new(HashMap::new()), transaction_mutex: Mutex::new(()), _phantom: PhantomData, } @@ -41,6 +45,11 @@ impl KeyValueStore for MemoryStore { fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db.write().insert(column_key, val.to_vec()); + self.col_keys + .write() + .entry(col.as_bytes().to_vec()) + .or_insert_with(HashSet::new) + .insert(key.to_vec()); Ok(()) } @@ -63,6 +72,10 @@ impl KeyValueStore for MemoryStore { fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db.write().remove(&column_key); + self.col_keys + .write() + .get_mut(&col.as_bytes().to_vec()) + .map(|set| set.remove(key)); Ok(()) } @@ -81,6 +94,26 @@ impl KeyValueStore for MemoryStore { Ok(()) } + // pub type ColumnIter<'a> = Box), Error>> + 'a>; + fn iter_column(&self, column: DBColumn) -> ColumnIter { + let col = column.as_str(); + if let Some(keys) = self + .col_keys + .read() + .get(col.as_bytes()) + .map(|set| set.iter().cloned().collect::>()) + { + Box::new(keys.into_iter().filter_map(move |key| { + let hash = Hash256::from_slice(&key); + self.get_bytes(col, &key) + .transpose() + .map(|res| res.map(|bytes| (hash, bytes))) + })) + } else { + Box::new(std::iter::empty()) + } + } + fn begin_rw_transaction(&self) -> MutexGuard<()> { self.transaction_mutex.lock() } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index e2a2eb37eb..d05677465b 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -3,6 +3,7 @@ * [Introduction](./intro.md) * [Become a Validator](./mainnet-validator.md) * [Become a Testnet Validator](./testnet-validator.md) +* [Merge Migration](./merge-migration.md) * [Installation](./installation.md) * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) @@ -17,21 +18,21 @@ * [Create a validator](./validator-create.md) * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) - * [Importing from the Staking Launchpad](./validator-import-launchpad.md) + * [Importing from the Staking Launchpad](./validator-import-launchpad.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Validator Monitoring](./validator-monitoring.md) * [Doppelganger Protection](./validator-doppelganger.md) * [Suggested Fee Recipient](./suggested-fee-recipient.md) * [APIs](./api.md) - * [Beacon Node API](./api-bn.md) - * [/lighthouse](./api-lighthouse.md) - * [Validator Inclusion APIs](./validator-inclusion.md) - * [Validator Client API](./api-vc.md) - * [Endpoints](./api-vc-endpoints.md) - * [Authorization Header](./api-vc-auth-header.md) - * [Signature Header](./api-vc-sig-header.md) - * [Prometheus Metrics](./advanced_metrics.md) + * [Beacon Node API](./api-bn.md) + * [/lighthouse](./api-lighthouse.md) + * [Validator Inclusion APIs](./validator-inclusion.md) + * [Validator Client API](./api-vc.md) + * [Endpoints](./api-vc-endpoints.md) + * [Authorization Header](./api-vc-auth-header.md) + * [Signature Header](./api-vc-sig-header.md) + * [Prometheus Metrics](./advanced_metrics.md) * [Advanced Usage](./advanced.md) * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) @@ -44,6 +45,7 @@ * [Redundancy](./redundancy.md) * [Pre-Releases](./advanced-pre-releases.md) * [Release Candidates](./advanced-release-candidates.md) + * [MEV and Lighthouse](./builders.md) * [Contributing](./contributing.md) - * [Development Environment](./setup.md) + * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 178936cf61..397d9a28b5 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -23,11 +23,11 @@ states to slow down dramatically. A lower _slots per restore point_ value (SPRP) frequent restore points, while a higher SPRP corresponds to less frequent. The table below shows some example values. -| Use Case | SPRP | Yearly Disk Usage | Load Historical State | -| ---------------------- | -------------- | ----------------- | --------------------- | -| Block explorer/analysis | 32 | 1.4 TB | 155 ms | -| Hobbyist (prev. default) | 2048 | 23.1 GB | 10.2 s | -| Validator only (default) | 8192 | 5.7 GB | 41 s | +| Use Case | SPRP | Yearly Disk Usage | Load Historical State | +|--------------------------|------|-------------------|-----------------------| +| Block explorer/analysis | 32 | 1.4 TB | 155 ms | +| Hobbyist (prev. default) | 2048 | 23.1 GB | 10.2 s | +| Validator only (default) | 8192 | 5.7 GB | 41 s | As you can see, it's a high-stakes trade-off! The relationships to disk usage and historical state load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index f5c4542b9e..d9c8080b4d 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -453,4 +453,23 @@ Caveats: loading a state on a boundary is most efficient. [block_reward_src]: -https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs \ No newline at end of file +https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs + + +### `/lighthouse/merge_readiness` + +```bash +curl -X GET "http://localhost:5052/lighthouse/merge_readiness" +``` + +``` +{ + "data":{ + "type":"ready", + "config":{ + "terminal_total_difficulty":"6400" + }, + "current_difficulty":"4800" + } + } +``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 69cd83db5c..9aedf6e249 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -24,12 +24,12 @@ Returns the software version and `git` commit hash for the Lighthouse binary. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/version` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/version` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Response Body @@ -47,12 +47,12 @@ Returns information regarding the health of the host machine. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/health` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/health` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | *Note: this endpoint is presently only available on Linux.* @@ -83,12 +83,12 @@ Returns the Ethereum proof-of-stake consensus specification loaded for this vali ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/spec` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/spec` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Response Body @@ -168,12 +168,12 @@ file may be read by a local user with access rights. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/auth` -Method | GET -Required Headers | - -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------| +| Path | `/lighthouse/auth` | +| Method | GET | +| Required Headers | - | +| Typical Responses | 200 | ### Example Path @@ -195,12 +195,12 @@ Lists all validators managed by this validator client. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Response Body @@ -232,12 +232,12 @@ Get a validator by their `voting_pubkey`. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/:voting_pubkey` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 400 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | ### Example Path @@ -262,12 +262,12 @@ Update some values for the validator with `voting_pubkey`. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/:voting_pubkey` -Method | PATCH -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 400 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | PATCH | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | ### Example Path @@ -301,12 +301,12 @@ Validators are generated from the mnemonic according to ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Request Body @@ -359,12 +359,12 @@ Import a keystore into the validator client. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/keystore` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/keystore` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Request Body @@ -433,12 +433,12 @@ generated with the path `m/12381/3600/i/42`. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/mnemonic` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/mnemonic` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Request Body @@ -479,12 +479,12 @@ Create any number of new validators, all of which will refer to a ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/web3signer` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 400 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/web3signer` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | ### Example Request Body diff --git a/book/src/builders.md b/book/src/builders.md new file mode 100644 index 0000000000..78a80899cc --- /dev/null +++ b/book/src/builders.md @@ -0,0 +1,144 @@ +# MEV and Lighthouse + +Lighthouse is able to interact with servers that implement the [builder +API](https://github.com/ethereum/builder-specs), allowing it to produce blocks without having +knowledge of the transactions included in the block. This enables Lighthouse to outsource the job of +transaction gathering/ordering within a block to parties specialized in this particular task. For +economic reasons, these parties will refuse to reveal the list of transactions to the validator +before the validator has committed to (i.e. signed) the block. A primer on MEV can be found +[here]([MEV](https://ethereum.org/en/developers/docs/mev/)). + +Using the builder API is not known to introduce additional slashing risks, however a live-ness risk +(i.e. the ability for the chain to produce valid blocks) is introduced because your node will be +signing blocks without executing the transactions within the block. Therefore it won't know whether +the transactions are valid and it may sign a block that the network will reject. This would lead to +a missed proposal and the opportunity cost of lost block rewards. + +## How to connect to a builder + +The beacon node and validator client each require a new flag for lighthouse to be fully compatible with builder API servers. + +``` +lighthouse bn --builder https://mainnet-builder.test +``` +The `--builder` flag will cause the beacon node to query the provided URL during block production for a block +payload with stubbed-out transactions. If this request fails, Lighthouse will fall back to the local +execution engine and produce a block using transactions gathered and verified locally. + +The beacon node will *only* query for this type of block (a "blinded" block) when a validator specifically requests it. +Otherwise, it will continue to serve full blocks as normal. In order to configure the validator client to query for +blinded blocks, you should use the following flag: + +``` +lighthouse vc --builder-proposals +``` +With the `--builder-proposals` flag, the validator client will ask for blinded blocks for all validators it manages. +In order to configure whether a validator queries for blinded blocks check out [this section.](#validator-client-configuration) + +## Multiple builders + +Lighthouse currently only supports a connection to a single builder. If you'd like to connect to multiple builders or +relays, run one of the following services and configure lighthouse to use it with the `--builder` flag. + +* [`mev-boost`][mev-boost] +* [`mev-rs`][mev-rs] + +## Validator Client Configuration + +In the validator client you can configure gas limit, fee recipient and whether to use the builder API on a +per-validator basis or set a configuration for all validators managed by the validator client. CLI flags for each of these +will serve as default values for all validators managed by the validator client. In order to manage the values +per-validator you can either make updates to the `validator_definitions.yml` file or you can use the HTTP requests +described below. + +Both the gas limit and fee recipient will be passed along as suggestions to connected builders. If there is a discrepancy +in either, it will *not* keep you from proposing a block with the builder. This is because the bounds on gas limit are calculated based +on prior execution blocks, so it should be managed by an execution engine, even if it is external. Depending on the +connected relay, payment to the proposer might be in the form of a transaction within the block to the fee recipient, +so a discrepancy in fee recipient might not indicate that there is something afoot. If you know the relay you are connected to *should* +only create blocks with a `fee_recipient` field matching the one suggested, you can use +the [strict fee recipient](suggested-fee-recipient.md#strict-fee-recipient) flag. + +### Enable/Disable builder proposals and set Gas Limit +Use the [lighthouse API](api-vc-endpoints.md) to configure these fields per-validator. + +#### `PATCH /lighthouse/validators/:voting_pubkey` + + +#### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | PATCH | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | + +#### Example Path + +``` +localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde +``` + +#### Example Request Body +Each field is optional. +```json +{ + "builder_proposals": true, + "gas_limit": 3000000001 +} +``` + +#### Example Response Body + +```json +null +``` +### Fee Recipient + +Refer to [suggested fee recipient](suggested-fee-recipient.md) documentation. + +### Validator definitions example +``` +--- +- enabled: true + voting_public_key: "0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007" + type: local_keystore + voting_keystore_path: /home/paul/.lighthouse/validators/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007/voting-keystore.json + voting_keystore_password_path: /home/paul/.lighthouse/secrets/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 + suggested_fee_recipient: "0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21" + gas_limit: 3000000001 + builder_proposals: true +- enabled: false + voting_public_key: "0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477" + type: local_keystore voting_keystore_path: /home/paul/.lighthouse/validators/0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477/voting-keystore.json + voting_keystore_password: myStrongpa55word123&$ + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: 333333333 + builder_proposals: true +``` + +## Circuit breaker conditions + +By outsourcing payload construction and signing blocks without verifying transactions, we are creating a new risk to +live-ness. If most of the network is using a small set of relays and one is bugged, a string of missed proposals could +happen quickly. This is not only generally bad for the network, but if you have a proposal coming up, you might not +realize that your next proposal is likely to be missed until it's too late. So we've implemented some "chain health" +checks to try and avoid scenarios like this. + +By default, Lighthouse is strict with these conditions, but we encourage users to learn about and adjust them. + +- `--builder-fallback-skips` - If we've seen this number of skip slots on the canonical chain in a row prior to proposing, we will NOT query + any connected builders, and will use the local execution engine for payload construction. +- `--builder-fallback-skips-per-epoch` - If we've seen this number of skip slots on the canonical chain in the past `SLOTS_PER_EPOCH`, we will NOT + query any connected builders, and will use the local execution engine for payload construction. +- `--builder-fallback-epochs-since-finalization` - If we're proposing and the chain has not finalized within + this number of epochs, we will NOT query any connected builders, and will use the local execution engine for payload + construction. Setting this value to anything less than 2 will cause the node to NEVER query connected builders. Setting + it to 2 will cause this condition to be hit if there are skips slots at the start of an epoch, right before this node + is set to propose. +- `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder + API will always be used for payload construction, regardless of recent chain conditions. + +[mev-rs]: https://github.com/ralexstokes/mev-rs +[mev-boost]: https://github.com/flashbots/mev-boost diff --git a/book/src/contributing.md b/book/src/contributing.md index 9204ff8463..4b21d1ecf2 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -33,7 +33,7 @@ Lighthouse maintains two permanent branches: - [`stable`][stable]: Always points to the latest stable release. - This is ideal for most users. - [`unstable`][unstable]: Used for development, contains the latest PRs. - - Developers should base thier PRs on this branch. + - Developers should base their PRs on this branch. ## Ethereum consensus client diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index ce7ff21328..6bbe1345d3 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -12,15 +12,29 @@ command for applying database downgrades. **Everything on this page applies to the Lighthouse _beacon node_, not to the validator client or the slasher**. +## List of schema versions + +| Lighthouse version | Release date | Schema version | Downgrade available? | +|--------------------|--------------|----------------|----------------------| +| v2.0.0 | Oct 2021 | v5 | no | +| v2.1.0 | Jan 2022 | v8 | no | +| v2.2.0 | Apr 2022 | v8 | no | +| v2.3.0 | May 2022 | v9 | yes (pre Bellatrix) | +| v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) | +| v2.5.0 | Aug 2022 | v11 | yes | + +> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release +> (e.g. v2.3.0). + ## How to apply a database downgrade To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. 1. Make sure you have a copy of the latest version of Lighthouse. This will be the version that knows about the latest schema change, and has the ability to revert it. -2. Work out the schema version you would like to downgrade to by checking the Lighthouse release - notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version from v8 to v9, then - you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. +2. Work out the schema version you would like to downgrade to by checking the table above, or the + Lighthouse release notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version + from v8 to v9, then you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. 3. **Ensure that downgrading is feasible**. Not all schema upgrades can be reverted, and some of them are time-sensitive. The release notes will state whether a downgrade is available and whether any caveats apply to it. diff --git a/book/src/docker.md b/book/src/docker.md index 9a0378f091..f22b8a2008 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -73,7 +73,7 @@ The `stability` is: The `arch` is: * `-amd64` for x86_64, e.g. Intel, AMD -* `-arm64` for aarch64, e.g. Rasperry Pi 4 +* `-arm64` for aarch64, e.g. Raspberry Pi 4 * empty for a multi-arch image (works on either `amd64` or `arm64` platforms) The `modernity` is: diff --git a/book/src/faq.md b/book/src/faq.md index e14947fb05..6692d61495 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -6,7 +6,7 @@ - [What should I do if I lose my slashing protection database?](#what-should-i-do-if-i-lose-my-slashing-protection-database) - [How do I update lighthouse?](#how-do-i-update-lighthouse) - [I can't compile lighthouse](#i-cant-compile-lighthouse) -- [What is "Syncing eth1 block cache"](#what-is-syncing-eth1-block-cache) +- [What is "Syncing deposit contract block cache"](#what-is-syncing-deposit-contract-block-cache) - [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup) - [How can I monitor my validators](#how-can-i-monitor-my-validators) @@ -154,10 +154,10 @@ You will just also need to make sure the code you have checked out is up to date See [here.](./installation-source.md#troubleshooting) -### What is "Syncing eth1 block cache" +### What is "Syncing deposit contract block cache" ``` -Nov 30 21:04:28.268 WARN Syncing eth1 block cache est_blocks_remaining: initializing deposits, service: slot_notifier +Nov 30 21:04:28.268 WARN Syncing deposit contract block cache est_blocks_remaining: initializing deposits, service: slot_notifier ``` This log indicates that your beacon node is downloading blocks and deposits diff --git a/book/src/installation-priorities.md b/book/src/installation-priorities.md index 69d871c396..0008e327b7 100644 --- a/book/src/installation-priorities.md +++ b/book/src/installation-priorities.md @@ -4,10 +4,10 @@ When publishing releases, Lighthouse will include an "Update Priority" section i The "Update Priority" section will include a table which may appear like so: -|User Class |Beacon Node | Validator Client| ---- | --- | --- -|Staking Users| Medium Priority | Low Priority | -|Non-Staking Users| Low Priority|---| +| User Class | Beacon Node | Validator Client | +|-------------------|-----------------|------------------| +| Staking Users | Medium Priority | Low Priority | +| Non-Staking Users | Low Priority | --- | To understand this table, the following terms are important: diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 4b977f5222..fc1ac4c092 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -19,6 +19,10 @@ Install the following packages: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` +> Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories +> of Ubuntu 18.04 or earlier. On these distributions CMake can still be installed via PPA: +> [https://apt.kitware.com/](https://apt.kitware.com) + #### macOS 1. Install the [Homebrew][] package manager. diff --git a/book/src/installation.md b/book/src/installation.md index 38fbe6b780..e222c401a2 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -14,7 +14,7 @@ The community maintains additional installation methods (currently only one). Additionally, there are two extra guides for specific uses: -- [Rapsberry Pi 4 guide](./pi.md). +- [Raspberry Pi 4 guide](./pi.md). - [Cross-compiling guide for developers](./cross-compiling.md). ## Minimum System Requirements diff --git a/book/src/mainnet-validator.md b/book/src/mainnet-validator.md index 0f91b8e272..41735f85bb 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet-validator.md @@ -34,7 +34,7 @@ Remember, if you get stuck you can always reach out on our [Discord][discord]. > > **Please note**: the Lighthouse team does not take any responsibility for losses or damages -> occured through the use of Lighthouse. We have an experienced internal security team and have +> occurred through the use of Lighthouse. We have an experienced internal security team and have > undergone multiple third-party security-reviews, however the possibility of bugs or malicious > interference remains a real and constant threat. Validators should be prepared to lose some rewards > due to the actions of other actors on the consensus layer or software bugs. See the diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md new file mode 100644 index 0000000000..6ed6a9977a --- /dev/null +++ b/book/src/merge-migration.md @@ -0,0 +1,101 @@ +# Merge Migration + +This document provides detail for users who have been running a Lighthouse node *before* the merge +and are now preparing their node for the merge transition. + +## "Pre-Merge" and "Post-Merge" + +As of [v2.4.0](https://github.com/sigp/lighthouse/releases/tag/v2.4.0) Lighthouse can be considered +to have two modes: + +- "Pre-merge": `--execution-endpoint` flag *is not* provided. +- "Post-merge": `--execution-endpoint` flag *is* provided. + +A "pre-merge" node, by definition, will fail to transition through the merge. Such a node *must* be +upgraded before the Bellatrix upgrade. + +## Migration + +Let us look at an example of the command line arguments for a pre-merge production staking BN: + +```bash +lighthouse \ + --network mainnet \ + beacon_node \ + --http \ + --eth1-endpoints http://localhost:8545,https://TOKEN@eth2-beacon-mainnet.infura.io +``` + +Converting the above to a post-merge configuration would render: + +```bash +lighthouse \ + --network mainnet \ + beacon_node \ + --http \ + --execution-endpoint http://localhost:8551 + --execution-jwt ~/.ethereum/geth/jwtsecret +``` + +The changes here are: + +1. Remove `--eth1-endpoints` + - The endpoint at `localhost` can be retained, it is our local execution engine. Once it is + upgraded to a merge-compatible release it will be used in the post-merge environment. + - The `infura.io` endpoint will be abandoned, Infura and most other third-party node providers + *are not* compatible with post-merge BNs. +2. Add the `--execution-endpoint` flag. + - We have reused the node at `localhost`, however we've switched to the authenticated engine API + port `8551`. All execution engines will have a specific port for this API, however it might + not be `8551`, see their documentation for details. +3. Add the `--execution-jwt` flag. + - This is the path to a file containing a 32-byte secret for authenticating the BN with the + execution engine. In this example our execution engine is Geth, so we've chosen the default + location for Geth. Your execution engine might have a different path. It is critical that both + the BN and execution engine reference a file with the same value, otherwise they'll fail to + communicate. + +Note that the `--network` and `--http` flags haven't changed. The only changes required for the +merge are ensuring that `--execution-endpoint` and `--execution-jwt` flags are provided! In fact, +you can even leave the `--eth1-endpoints` flag there, it will be ignored. This is not recommended as +a deprecation warning will be logged and Lighthouse *may* remove these flags in the future. + +There are no changes required for the validator client, apart from ensure it has been updated to the +same version as the beacon node. Check the version with `lighthouse --version`. + +## The relationship between `--eth1-endpoints` and `--execution-endpoint` + +Pre-merge users will be familiar with the `--eth1-endpoints` flag. This provides a list of Ethereum +"eth1" nodes (e.g., Geth, Nethermind, etc). Each beacon node (BN) can have multiple eth1 endpoints +and each eth1 endpoint can have many BNs connection (many-to-many relationship). The eth1 node +provides a source of truth for the [deposit +contract](https://ethereum.org/en/staking/deposit-contract/) and beacon chain proposers include this +information in beacon blocks in order to on-board new validators. BNs exclusively use the `eth` +namespace on the eth1 [JSON-RPC API](https://ethereum.org/en/developers/docs/apis/json-rpc/) to +achieve this. + +To progress through the Bellatrix upgrade nodes will need a *new* connection to an "eth1" node; +`--execution-endpoint`. This connection has a few different properties. Firstly, the term "eth1 +node" has been deprecated and replaced with "execution engine". Whilst "eth1 node" and "execution +engine" still refer to the same projects (Geth, Nethermind, etc) the former refers to the pre-merge +versions and the latter refers to post-merge versions. Secondly, there is a strict one-to-one +relationship between Lighthouse and the execution engine; only one Lighthouse node can connect to +one execution engine. Thirdly, it is impossible to fully verify the post-merge chain without an +execution engine. It *was* possible to verify the pre-merge chain without an eth1 node, it was just +impossible to reliably *propose* blocks without it. + +Since an execution engine is a hard requirement in the post-merge chain and the execution engine +contains the transaction history of the Ethereum chain, there is no longer a need for the +`--eth1-endpoints` flag for information about the deposit contract. The `--execution-endpoint` can +be used for all such queries. Therefore we can say that where `--execution-endpoint` is included +`--eth1-endpoints` should be omitted. + +## What about multiple execution endpoints? + +Since an execution engine can only have one connected BN, the value of having multiple execution +engines connected to the same BN is very low. An execution engine cannot be shared between BNs to +reduce costs. + +Whilst having multiple execution engines connected to a single BN might be useful for advanced +testing scenarios, Lighthouse (and other consensus clients) have decided to support *only one* +execution endpoint. Such scenarios could be resolved with a custom-made HTTP proxy. diff --git a/book/src/setup.md b/book/src/setup.md index dfff9290e6..e8c56623be 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -19,7 +19,7 @@ The additional requirements for developers are: ## Using `make` -Commands to run the test suite are avaiable via the `Makefile` in the +Commands to run the test suite are available via the `Makefile` in the project root for the benefit of CI/CD. We list some of these commands below so you can run them locally and avoid CI failures: diff --git a/book/src/slasher.md b/book/src/slasher.md index 05107238c3..889f9c6cbc 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -1,6 +1,6 @@ # Running a Slasher -Lighthouse includes a slasher for identifying slashable offences comitted by other validators and +Lighthouse includes a slasher for identifying slashable offences committed by other validators and including proof of those offences in blocks. Running a slasher is a good way to contribute to the health of the network, and doing so can earn @@ -69,7 +69,7 @@ The slasher uses MDBX as its backing store, which places a hard limit on the siz file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after initialization if the limit is reached. -By default the limit is set to accomodate the default history length and around 300K validators but +By default the limit is set to accommodate the default history length and around 300K validators but you can set it lower if running with a reduced history length. The space required scales approximately linearly in validator count and history length, i.e. if you halve either you can halve the space required. @@ -134,7 +134,7 @@ the slot duration. ### Chunk Size and Validator Chunk Size * Flags: `--slasher-chunk-size EPOCHS`, `--slasher-validator-chunk-size NUM_VALIDATORS` -* Arguments: number of ecochs, number of validators +* Arguments: number of epochs, number of validators * Defaults: 16, 256 Adjusting these parameter should only be done in conjunction with reading in detail diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md index 9ae6c102e3..a60c8e36dc 100644 --- a/book/src/slashing-protection.md +++ b/book/src/slashing-protection.md @@ -54,7 +54,7 @@ Examples where it is **ineffective** are: clients (e.g. Lighthouse and Prysm) running on the same machine, two Lighthouse instances using different datadirs, or two clients on completely different machines (e.g. one on a cloud server and one running locally). You are responsible for ensuring that your validator keys are never - running simultanously – the slashing protection DB **cannot protect you in this case**. + running simultaneously – the slashing protection DB **cannot protect you in this case**. * Importing keys from another client without also importing voting history. * If you use `--init-slashing-protection` to recreate a missing slashing protection database. diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index 3ff71ec7d6..a584be306f 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -10,7 +10,8 @@ coinbase and the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, it may use any address it chooses. It is assumed that an honest execution node *will* use the -`suggested_fee_recipient`, but users should note this trust assumption. +`suggested_fee_recipient`, but users should note this trust assumption. Check out the +[strict fee recipient](#strict-fee-recipient) section for how to mitigate this assumption. The `suggested_fee_recipient` can be provided to the VC, who will transmit it to the BN. The BN also has a choice regarding the fee recipient it passes to the execution node, creating another @@ -26,13 +27,17 @@ Lighthouse BN also provides a method for defining this value, should the VC not Assuming trustworthy nodes, the priority for the four methods is: 1. `validator_definitions.yml` -1. `--suggested-fee-recipient-file` 1. `--suggested-fee-recipient` provided to the VC. 1. `--suggested-fee-recipient` provided to the BN. -Users may configure the fee recipient via `validator_definitions.yml` or via the -`--suggested-fee-recipient-file` flag. The value in `validator_definitions.yml` will always take -precedence. +## Strict Fee Recipient + +If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose +`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal +block proposal flow and block proposals through the builder API. Proposals through the builder API are more likely +to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before +using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the +local execution engine for payload construction, where a strict fee recipient check will still be applied. ### 1. Setting the fee recipient in the `validator_definitions.yml` @@ -56,36 +61,111 @@ Below is an example of the validator_definitions.yml with `suggested_fee_recipie suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" ``` -### 2. Using the "--suggested-fee-recipient-file" flag on the validator client - -Users can specify a file with the `--suggested-fee-recipient-file` flag. This option is useful for dynamically -changing fee recipients. This file is reloaded each time a validator is chosen to propose a block. - -Usage: -`lighthouse vc --suggested-fee-recipient-file fee_recipient.txt` - -The file should contain key value pairs corresponding to validator public keys and their associated -fee recipient. The file can optionally contain a `default` key for the default case. - -The following example sets the default and the values for the validators with pubkeys `0x87a5` and -`0xa556`: - -``` -default: 0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21 -0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007: 0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21 -0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477: 0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d -``` - -Lighthouse will first search for the fee recipient corresponding to the public key of the proposing -validator, if there are no matches for the public key, then it uses the address corresponding to the -default key (if present). - -### 3. Using the "--suggested-fee-recipient" flag on the validator client +### 2. Using the "--suggested-fee-recipient" flag on the validator client The `--suggested-fee-recipient` can be provided to the VC to act as a default value for all validators where a `suggested_fee_recipient` is not loaded from another method. -### 4. Using the "--suggested-fee-recipient" flag on the beacon node +### 3. Using the "--suggested-fee-recipient" flag on the beacon node The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the validator client does not transmit a `suggested_fee_recipient` to the BN. + +## Setting the fee recipient dynamically using the keymanager API + +When the [validator client API](api-vc.md) is enabled, the +[standard keymanager API](https://ethereum.github.io/keymanager-APIs/) includes an endpoint +for setting the fee recipient dynamically for a given public key. When used, the fee recipient +will be saved in `validator_definitions.yml` so that it persists across restarts of the validator +client. + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/eth/v1/validator/{pubkey}/feerecipient` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 202, 404 | + +#### Example Request Body +```json +{ + "ethaddress": "0x1D4E51167DBDC4789a014357f4029ff76381b16c" +} +``` + +```bash +DATADIR=$HOME/.lighthouse/mainnet +PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 +FEE_RECIPIENT=0x1D4E51167DBDC4789a014357f4029ff76381b16c + +curl -X POST \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + -d "{ \"ethaddress\": \"${FEE_RECIPIENT}\" }" \ + http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq +``` + +#### Successful Response (202) +```json +null +``` + +### Querying the fee recipient + +The same path with a `GET` request can be used to query the fee recipient for a given public key at any time. + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/eth/v1/validator/{pubkey}/feerecipient` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 404 | + +```bash +DATADIR=$HOME/.lighthouse/mainnet +PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 + +curl -X GET \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq +``` + +#### Successful Response (200) +```json +{ + "data": { + "pubkey": "0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591", + "ethaddress": "0x1d4e51167dbdc4789a014357f4029ff76381b16c" + } +} +``` + +### Removing the fee recipient + +The same path with a `DELETE` request can be used to remove the fee recipient for a given public key at any time. +This is useful if you want the fee recipient to fall back to the validator client (or beacon node) default. + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/eth/v1/validator/{pubkey}/feerecipient` | +| Method | DELETE | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 204, 404 | + +```bash +DATADIR=$HOME/.lighthouse/mainnet +PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 + +curl -X DELETE \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq +``` + +#### Successful Response (204) +```json +null +``` + + diff --git a/book/src/validator-import-launchpad.md b/book/src/validator-import-launchpad.md index aee9ac7b96..9849b91b70 100644 --- a/book/src/validator-import-launchpad.md +++ b/book/src/validator-import-launchpad.md @@ -1,6 +1,6 @@ # Importing from the Ethereum Staking Launch pad -The [Staking Lauchpad](https://github.com/ethereum/eth2.0-deposit) is a website +The [Staking Launchpad](https://github.com/ethereum/eth2.0-deposit) is a website from the Ethereum Foundation which guides users how to use the [`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) command-line program to generate consensus validator keys. diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index a8d6e03680..8523237c69 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.3.2-rc.0" +version = "2.5.0" authors = ["Sigma Prime "] edition = "2021" diff --git a/bors.toml b/bors.toml index d7d1e98762..0ff5d6231b 100644 --- a/bors.toml +++ b/bors.toml @@ -7,6 +7,7 @@ status = [ "ef-tests-ubuntu", "dockerfile-ubuntu", "eth1-simulator-ubuntu", + "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", "check-benchmarks", "check-consensus", diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 3f4831ae17..66e3b73547 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -45,6 +45,29 @@ pub enum Error { UnableToCreateValidatorDir(PathBuf), } +#[derive(Clone, PartialEq, Serialize, Deserialize, Hash, Eq)] +pub struct Web3SignerDefinition { + pub url: String, + /// Path to a .pem file. + #[serde(skip_serializing_if = "Option::is_none")] + pub root_certificate_path: Option, + /// Specifies a request timeout. + /// + /// The timeout is applied from when the request starts connecting until the response body has finished. + #[serde(skip_serializing_if = "Option::is_none")] + pub request_timeout_ms: Option, + + /// Path to a PKCS12 file. + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_path: Option, + + /// Password for the PKCS12 file. + /// + /// An empty password will be used if this is omitted. + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_password: Option, +} + /// Defines how the validator client should attempt to sign messages for this validator. #[derive(Clone, PartialEq, Serialize, Deserialize)] #[serde(tag = "type")] @@ -62,27 +85,7 @@ pub enum SigningDefinition { /// /// https://github.com/ConsenSys/web3signer #[serde(rename = "web3signer")] - Web3Signer { - url: String, - /// Path to a .pem file. - #[serde(skip_serializing_if = "Option::is_none")] - root_certificate_path: Option, - /// Specifies a request timeout. - /// - /// The timeout is applied from when the request starts connecting until the response body has finished. - #[serde(skip_serializing_if = "Option::is_none")] - request_timeout_ms: Option, - - /// Path to a PKCS12 file. - #[serde(skip_serializing_if = "Option::is_none")] - client_identity_path: Option, - - /// Password for the PKCS12 file. - /// - /// An empty password will be used if this is omitted. - #[serde(skip_serializing_if = "Option::is_none")] - client_identity_password: Option, - }, + Web3Signer(Web3SignerDefinition), } impl SigningDefinition { @@ -106,6 +109,12 @@ pub struct ValidatorDefinition { #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, + #[serde(default)] pub description: String, #[serde(flatten)] pub signing_definition: SigningDefinition, @@ -123,6 +132,8 @@ impl ValidatorDefinition { voting_keystore_password: Option, graffiti: Option, suggested_fee_recipient: Option
, + gas_limit: Option, + builder_proposals: Option, ) -> Result { let voting_keystore_path = voting_keystore_path.as_ref().into(); let keystore = @@ -135,6 +146,8 @@ impl ValidatorDefinition { description: keystore.description().unwrap_or("").to_string(), graffiti, suggested_fee_recipient, + gas_limit, + builder_proposals, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, @@ -281,6 +294,8 @@ impl ValidatorDefinitions { description: keystore.description().unwrap_or("").to_string(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path, @@ -523,4 +538,84 @@ mod tests { Some(Address::from_str("0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d").unwrap()) ); } + + #[test] + fn gas_limit_checks() { + let no_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + let def: ValidatorDefinition = serde_yaml::from_str(no_gas_limit).unwrap(); + assert!(def.gas_limit.is_none()); + + let invalid_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: "banana" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: Result = serde_yaml::from_str(invalid_gas_limit); + assert!(def.is_err()); + + let valid_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: 35000000 + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: ValidatorDefinition = serde_yaml::from_str(valid_gas_limit).unwrap(); + assert_eq!(def.gas_limit, Some(35000000)); + } + + #[test] + fn builder_proposals_checks() { + let no_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + let def: ValidatorDefinition = serde_yaml::from_str(no_builder_proposals).unwrap(); + assert!(def.builder_proposals.is_none()); + + let invalid_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + builder_proposals: "banana" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: Result = serde_yaml::from_str(invalid_builder_proposals); + assert!(def.is_err()); + + let valid_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + builder_proposals: true + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: ValidatorDefinition = serde_yaml::from_str(valid_builder_proposals).unwrap(); + assert_eq!(def.builder_proposals, Some(true)); + } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index d374101308..8cd138e980 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -110,6 +110,7 @@ pub struct Timeouts { pub liveness: Duration, pub proposal: Duration, pub proposer_duties: Duration, + pub sync_committee_contribution: Duration, pub sync_duties: Duration, } @@ -121,6 +122,7 @@ impl Timeouts { liveness: timeout, proposal: timeout, proposer_duties: timeout, + sync_committee_contribution: timeout, sync_duties: timeout, } } @@ -330,7 +332,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_root( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -349,7 +351,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_fork( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -368,7 +370,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_finality_checkpoints( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -388,7 +390,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ids: Option<&[ValidatorId]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -418,7 +420,7 @@ impl BeaconNodeHttpClient { state_id: StateId, ids: Option<&[ValidatorId]>, statuses: Option<&[ValidatorStatus]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -458,7 +460,7 @@ impl BeaconNodeHttpClient { slot: Option, index: Option, epoch: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -491,7 +493,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result, Error> { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -516,7 +518,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, validator_id: &ValidatorId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -537,7 +539,7 @@ impl BeaconNodeHttpClient { &self, slot: Option, parent_root: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -564,7 +566,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_headers_block_id( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -633,7 +635,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let path = self.get_beacon_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, @@ -642,20 +644,31 @@ impl BeaconNodeHttpClient { // If present, use the fork provided in the headers to decode the block. Gracefully handle // missing and malformed fork names by falling back to regular deserialisation. - let (block, version) = match response.fork_name_from_header() { + let (block, version, execution_optimistic) = match response.fork_name_from_header() { Ok(Some(fork_name)) => { - map_fork_name_with!(fork_name, SignedBeaconBlock, { - let ForkVersionedResponse { version, data } = response.json().await?; - (data, version) - }) + let (data, (version, execution_optimistic)) = + map_fork_name_with!(fork_name, SignedBeaconBlock, { + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, (version, execution_optimistic)) + }); + (data, version, execution_optimistic) } Ok(None) | Err(_) => { - let ForkVersionedResponse { version, data } = response.json().await?; - (data, version) + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, version, execution_optimistic) } }; - Ok(Some(ForkVersionedResponse { + Ok(Some(ExecutionOptimisticForkVersionedResponse { version, + execution_optimistic, data: block, })) } @@ -700,7 +713,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_root( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -719,7 +732,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_attestations( &self, block_id: BlockId, - ) -> Result>>>, Error> { + ) -> Result>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -907,7 +920,12 @@ impl BeaconNodeHttpClient { .push("validator") .push("contribution_and_proofs"); - self.post(path, &signed_contributions).await?; + self.post_with_timeout( + path, + &signed_contributions, + self.timeouts.sync_committee_contribution, + ) + .await?; Ok(()) } @@ -1116,7 +1134,7 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let path = self.get_debug_beacon_states_path(state_id)?; self.get_opt(path).await } @@ -1125,7 +1143,7 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states_v1( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1153,9 +1171,24 @@ impl BeaconNodeHttpClient { .transpose() } - /// `GET debug/beacon/heads` + /// `GET v2/debug/beacon/heads` pub async fn get_debug_beacon_heads( &self, + ) -> Result>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("heads"); + + self.get(path).await + } + + /// `GET v1/debug/beacon/heads` (LEGACY) + pub async fn get_debug_beacon_heads_v1( + &self, ) -> Result>, Error> { let mut path = self.eth_path(V1)?; @@ -1247,7 +1280,7 @@ impl BeaconNodeHttpClient { .await } - /// `GET v2/validator/blocks/{slot}` + /// `GET v1/validator/blinded_blocks/{slot}` pub async fn get_validator_blinded_blocks_with_verify_randao< T: EthSpec, Payload: ExecPayload, @@ -1258,7 +1291,7 @@ impl BeaconNodeHttpClient { graffiti: Option<&Graffiti>, verify_randao: Option, ) -> Result>, Error> { - let mut path = self.eth_path(V2)?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1487,7 +1520,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, indices: &[u64], - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 5e02ec0bb2..abed4fe5e7 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -303,11 +303,11 @@ impl ValidatorClientHttpClient { } /// Perform a HTTP DELETE request. - async fn delete_with_unsigned_response( + async fn delete_with_raw_response( &self, url: U, body: &T, - ) -> Result { + ) -> Result { let response = self .client .delete(url) @@ -316,7 +316,16 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::Reqwest)?; - let response = ok_or_error(response).await?; + ok_or_error(response).await + } + + /// Perform a HTTP DELETE request. + async fn delete_with_unsigned_response( + &self, + url: U, + body: &T, + ) -> Result { + let response = self.delete_with_raw_response(url, body).await?; Ok(response.json().await?) } @@ -453,7 +462,9 @@ impl ValidatorClientHttpClient { pub async fn patch_lighthouse_validators( &self, voting_pubkey: &PublicKeyBytes, - enabled: bool, + enabled: Option, + gas_limit: Option, + builder_proposals: Option, ) -> Result<(), Error> { let mut path = self.server.full.clone(); @@ -463,7 +474,15 @@ impl ValidatorClientHttpClient { .push("validators") .push(&voting_pubkey.to_string()); - self.patch(path, &ValidatorPatchRequest { enabled }).await + self.patch( + path, + &ValidatorPatchRequest { + enabled, + gas_limit, + builder_proposals, + }, + ) + .await } fn make_keystores_url(&self) -> Result { @@ -486,6 +505,18 @@ impl ValidatorClientHttpClient { Ok(url) } + fn make_fee_recipient_url(&self, pubkey: &PublicKeyBytes) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("feerecipient"); + Ok(url) + } + /// `GET lighthouse/auth` pub async fn get_auth(&self) -> Result { let mut url = self.server.full.clone(); @@ -543,14 +574,44 @@ impl ValidatorClientHttpClient { let url = self.make_remotekeys_url()?; self.delete_with_unsigned_response(url, req).await } + + /// `GET /eth/v1/validator/{pubkey}/feerecipient` + pub async fn get_fee_recipient( + &self, + pubkey: &PublicKeyBytes, + ) -> Result { + let url = self.make_fee_recipient_url(pubkey)?; + self.get(url) + .await + .map(|generic: GenericResponse| generic.data) + } + + /// `POST /eth/v1/validator/{pubkey}/feerecipient` + pub async fn post_fee_recipient( + &self, + pubkey: &PublicKeyBytes, + req: &UpdateFeeRecipientRequest, + ) -> Result { + let url = self.make_fee_recipient_url(pubkey)?; + self.post_with_raw_response(url, req).await + } + + /// `POST /eth/v1/validator/{pubkey}/feerecipient` + pub async fn delete_fee_recipient(&self, pubkey: &PublicKeyBytes) -> Result { + let url = self.make_fee_recipient_url(pubkey)?; + self.delete_with_raw_response(url, &()).await + } } -/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an -/// appropriate error message. +/// Returns `Ok(response)` if the response is a `200 OK` response or a +/// `202 Accepted` response. Otherwise, creates an appropriate error message. async fn ok_or_error(response: Response) -> Result { let status = response.status(); - if status == StatusCode::OK { + if status == StatusCode::OK + || status == StatusCode::ACCEPTED + || status == StatusCode::NO_CONTENT + { Ok(response) } else if let Ok(message) = response.json().await { Err(Error::ServerMessage(message)) diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index d9fe969138..62987c1368 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -2,7 +2,13 @@ use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; use slashing_protection::interchange::Interchange; -use types::PublicKeyBytes; +use types::{Address, PublicKeyBytes}; + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct GetFeeRecipientResponse { + pub pubkey: PublicKeyBytes, + pub ethaddress: Address, +} #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct AuthResponse { diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index fe9b6a48c0..d829c97cc7 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -26,6 +26,12 @@ pub struct ValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, #[serde(with = "eth2_serde_utils::quoted_u64")] pub deposit_gwei: u64, } @@ -49,6 +55,12 @@ pub struct CreatedValidator { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, pub eth1_deposit_tx_data: String, #[serde(with = "eth2_serde_utils::quoted_u64")] pub deposit_gwei: u64, @@ -62,7 +74,15 @@ pub struct PostValidatorsResponseData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorPatchRequest { - pub enabled: bool, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub enabled: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, } #[derive(Clone, PartialEq, Serialize, Deserialize)] @@ -70,8 +90,18 @@ pub struct KeystoreValidatorsPostRequest { pub password: ZeroizeString, pub enable: bool, pub keystore: Keystore, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub graffiti: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -84,6 +114,12 @@ pub struct Web3SignerValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, pub voting_public_key: PublicKey, pub url: String, #[serde(default)] @@ -97,3 +133,8 @@ pub struct Web3SignerValidatorRequest { #[serde(skip_serializing_if = "Option::is_none")] pub client_identity_password: Option, } + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct UpdateFeeRecipientRequest { + pub ethaddress: Address, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 8ef3582268..3e480e0827 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -189,6 +189,14 @@ impl fmt::Display for StateId { #[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] pub struct DutiesResponse { pub dependent_root: Hash256, + pub execution_optimistic: Option, + pub data: T, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] +pub struct ExecutionOptimisticResponse { + pub execution_optimistic: Option, pub data: T, } @@ -204,6 +212,18 @@ impl From for GenericResponse } } +impl GenericResponse { + pub fn add_execution_optimistic( + self, + execution_optimistic: bool, + ) -> ExecutionOptimisticResponse { + ExecutionOptimisticResponse { + execution_optimistic: Some(execution_optimistic), + data: self.data, + } + } +} + #[derive(Debug, PartialEq, Clone, Serialize)] #[serde(bound = "T: Serialize")] pub struct GenericResponseRef<'a, T: Serialize> { @@ -216,6 +236,14 @@ impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct ExecutionOptimisticForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub execution_optimistic: Option, + pub data: T, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct ForkVersionedResponse { #[serde(skip_serializing_if = "Option::is_none")] @@ -495,6 +523,8 @@ pub struct DepositContractData { pub struct ChainHeadData { pub slot: Slot, pub root: Hash256, + #[serde(skip_serializing_if = "Option::is_none")] + pub execution_optimistic: Option, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -522,6 +552,7 @@ pub struct VersionData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncingData { pub is_syncing: bool, + pub is_optimistic: bool, pub head_slot: Slot, pub sync_distance: Slot, } @@ -794,6 +825,7 @@ pub struct PeerCount { pub struct SseBlock { pub slot: Slot, pub block: Hash256, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -801,6 +833,7 @@ pub struct SseFinalizedCheckpoint { pub block: Hash256, pub state: Hash256, pub epoch: Epoch, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -811,6 +844,7 @@ pub struct SseHead { pub current_duty_dependent_root: Hash256, pub previous_duty_dependent_root: Hash256, pub epoch_transition: bool, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -823,6 +857,7 @@ pub struct SseChainReorg { pub new_head_block: Hash256, pub new_head_state: Hash256, pub epoch: Epoch, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -837,6 +872,7 @@ pub struct SseLateHead { pub observed_delay: Option, pub imported_delay: Option, pub set_as_head_delay: Option, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Clone)] diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 9cea725865..7e3c025a83 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -69,7 +69,7 @@ impl Eth2Config { #[derive(Copy, Clone, Debug, PartialEq)] pub struct Eth2NetArchiveAndDirectory<'a> { pub name: &'a str, - pub unique_id: &'a str, + pub config_dir: &'a str, pub genesis_is_known: bool, } @@ -81,7 +81,7 @@ impl<'a> Eth2NetArchiveAndDirectory<'a> { .parse::() .expect("should parse manifest dir as path") .join(PREDEFINED_NETWORKS_DIR) - .join(self.unique_id) + .join(self.config_dir) } pub fn genesis_state_archive(&self) -> PathBuf { @@ -96,6 +96,7 @@ const GENESIS_STATE_IS_KNOWN: bool = true; #[derive(Copy, Clone, Debug, PartialEq)] pub struct HardcodedNet { pub name: &'static str, + pub config_dir: &'static str, pub genesis_is_known: bool, pub config: &'static [u8], pub deploy_block: &'static [u8], @@ -108,15 +109,15 @@ pub struct HardcodedNet { /// It also defines a `include__file!` macro which provides a wrapper around /// `std::include_bytes`, allowing the inclusion of bytes from the specific testnet directory. macro_rules! define_archive { - ($name_ident: ident, $name_str: tt, $genesis_is_known: ident) => { + ($name_ident: ident, $config_dir: tt, $genesis_is_known: ident) => { paste! { #[macro_use] pub mod $name_ident { use super::*; pub const ETH2_NET_DIR: Eth2NetArchiveAndDirectory = Eth2NetArchiveAndDirectory { - name: $name_str, - unique_id: $name_str, + name: stringify!($name_ident), + config_dir: $config_dir, genesis_is_known: $genesis_is_known, }; @@ -130,7 +131,7 @@ macro_rules! define_archive { "/", $this_crate::predefined_networks_dir!(), "/", - $name_str, + $config_dir, "/", $filename )) @@ -149,6 +150,7 @@ macro_rules! define_net { $this_crate::HardcodedNet { name: ETH2_NET_DIR.name, + config_dir: ETH2_NET_DIR.config_dir, genesis_is_known: ETH2_NET_DIR.genesis_is_known, config: $this_crate::$include_file!($this_crate, "../", "config.yaml"), deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"), @@ -164,13 +166,13 @@ macro_rules! define_net { /// - `HARDCODED_NET_NAMES`: a list of the *names* of the networks defined by this macro. #[macro_export] macro_rules! define_nets { - ($this_crate: ident, $($name_ident: ident, $name_str: tt,)+) => { + ($this_crate: ident, $($name_ident: ident,)+) => { $this_crate::paste! { $( const [<$name_ident:upper>]: $this_crate::HardcodedNet = $this_crate::define_net!($this_crate, $name_ident, [<include_ $name_ident _file>]); )+ const HARDCODED_NETS: &[$this_crate::HardcodedNet] = &[$([<$name_ident:upper>],)+]; - pub const HARDCODED_NET_NAMES: &[&'static str] = &[$($name_str,)+]; + pub const HARDCODED_NET_NAMES: &[&'static str] = &[$(stringify!($name_ident),)+]; } }; } @@ -197,9 +199,9 @@ macro_rules! define_nets { /// `build.rs` which will unzip the genesis states. Then, that `eth2_network_configs` crate can /// perform the final step of using `std::include_bytes` to bake the files (bytes) into the binary. macro_rules! define_hardcoded_nets { - ($(($name_ident: ident, $name_str: tt, $genesis_is_known: ident)),+) => { + ($(($name_ident: ident, $config_dir: tt, $genesis_is_known: ident)),+) => { $( - define_archive!($name_ident, $name_str, $genesis_is_known); + define_archive!($name_ident, $config_dir, $genesis_is_known); )+ pub const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[$($name_ident::ETH2_NET_DIR,)+]; @@ -213,7 +215,7 @@ macro_rules! define_hardcoded_nets { #[macro_export] macro_rules! instantiate_hardcoded_nets { ($this_crate: ident) => { - $this_crate::define_nets!($this_crate, $($name_ident, $name_str,)+); + $this_crate::define_nets!($this_crate, $($name_ident,)+); } } }; @@ -234,10 +236,76 @@ macro_rules! define_hardcoded_nets { // // The directory containing the testnet files should match the human-friendly name (element 1). define_hardcoded_nets!( - (mainnet, "mainnet", GENESIS_STATE_IS_KNOWN), - (prater, "prater", GENESIS_STATE_IS_KNOWN), - (gnosis, "gnosis", GENESIS_STATE_IS_KNOWN), - (kiln, "kiln", GENESIS_STATE_IS_KNOWN), - (ropsten, "ropsten", GENESIS_STATE_IS_KNOWN), - (sepolia, "sepolia", GENESIS_STATE_IS_KNOWN) + ( + // Network name (must be unique among all networks). + mainnet, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "mainnet", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + prater, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "prater", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + goerli, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + // + // The Goerli network is effectively an alias to Prater. + "prater", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + gnosis, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "gnosis", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + kiln, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "kiln", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + ropsten, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "ropsten", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + sepolia, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "sepolia", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ) ); diff --git a/common/eth2_network_config/built_in_network_configs/kiln/config.yaml b/common/eth2_network_config/built_in_network_configs/kiln/config.yaml index 797c0672c3..5631c8a0bf 100644 --- a/common/eth2_network_config/built_in_network_configs/kiln/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/kiln/config.yaml @@ -6,7 +6,7 @@ PRESET_BASE: 'mainnet' MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 95000 # Mar 11th, 2022, 14:00 UTC MIN_GENESIS_TIME: 1647007200 -# Gensis fork +# Genesis fork GENESIS_FORK_VERSION: 0x70000069 # 300 seconds (5 min) GENESIS_DELAY: 300 diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index d337c4120a..d173be20de 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -6,8 +6,7 @@ PRESET_BASE: 'mainnet' # Transition # --------------------------------------------------------------- -# TBD, 2**256-2**10 is a placeholder -TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +TERMINAL_TOTAL_DIFFICULTY: 10790000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 @@ -35,7 +34,7 @@ ALTAIR_FORK_VERSION: 0x01001020 ALTAIR_FORK_EPOCH: 36660 # Merge BELLATRIX_FORK_VERSION: 0x02001020 -BELLATRIX_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_EPOCH: 112260 # Sharding SHARDING_FORK_VERSION: 0x03001020 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 8df54a5a8b..2bfd003266 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -256,6 +256,13 @@ mod tests { config.beacon_state::<E>().expect("beacon state can decode"); } + #[test] + fn prater_and_goerli_are_equal() { + let goerli = Eth2NetworkConfig::from_hardcoded_net(&GOERLI).unwrap(); + let prater = Eth2NetworkConfig::from_hardcoded_net(&PRATER).unwrap(); + assert_eq!(goerli, prater); + } + #[test] fn hard_coded_nets_work() { for net in HARDCODED_NETS { @@ -275,7 +282,7 @@ mod tests { "{:?}", net.name ); - assert_eq!(config.config.config_name, Some(net.name.to_string())); + assert_eq!(config.config.config_name, Some(net.config_dir.to_string())); } } diff --git a/common/fallback/src/lib.rs b/common/fallback/src/lib.rs index d91de09be0..70f327d204 100644 --- a/common/fallback/src/lib.rs +++ b/common/fallback/src/lib.rs @@ -45,7 +45,7 @@ impl<T> Fallback<T> { { match error { FallbackError::AllErrored(v) => format!( - "All fallback errored: {}", + "All fallbacks errored: {}", join( zip(self.servers.iter().map(f), v.iter()) .map(|(server, error)| format!("{} => {:?}", server, error)), diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index e4a6bd0179..7ba1afac60 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.3.2-rc.0-", - fallback = "Lighthouse/v2.3.2-rc.0" + prefix = "Lighthouse/v2.5.0-", + fallback = "Lighthouse/v2.5.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs index f5ce1156e5..cf3d11af8d 100644 --- a/common/warp_utils/src/reject.rs +++ b/common/warp_utils/src/reject.rs @@ -205,8 +205,13 @@ pub async fn handle_rejection(err: warp::Rejection) -> Result<impl warp::Reply, code = StatusCode::FORBIDDEN; message = format!("FORBIDDEN: Invalid auth token: {}", e.0); } else if let Some(e) = err.find::<warp::reject::MissingHeader>() { - code = StatusCode::BAD_REQUEST; - message = format!("BAD_REQUEST: missing {} header", e.name()); + if e.name().eq("Authorization") { + code = StatusCode::UNAUTHORIZED; + message = "UNAUTHORIZED: missing Authorization header".to_string(); + } else { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: missing {} header", e.name()); + } } else if let Some(e) = err.find::<warp::reject::InvalidHeader>() { code = StatusCode::BAD_REQUEST; message = format!("BAD_REQUEST: invalid {} header", e.name()); diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 429ab1b8c5..b2570092e6 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -8,6 +8,7 @@ edition = "2021" [dependencies] types = { path = "../types" } +state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 7390ce7f94..c8d119a99b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,18 +1,23 @@ use crate::{ForkChoiceStore, InvalidationOperation}; use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice}; use ssz_derive::{Decode, Encode}; +use state_processing::{ + per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, +}; use std::cmp::Ordering; +use std::collections::BTreeSet; use std::marker::PhantomData; use std::time::Duration; use types::{ - consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlockRef, BeaconState, - BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, + consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, AttesterSlashing, BeaconBlockRef, + BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, + ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; #[derive(Debug)] pub enum Error<T> { InvalidAttestation(InvalidAttestation), + InvalidAttesterSlashing(AttesterSlashingValidationError), InvalidBlock(InvalidBlock), ProtoArrayError(String), InvalidProtoArrayBytes(String), @@ -51,6 +56,9 @@ pub enum Error<T> { MissingFinalizedBlock { finalized_checkpoint: Checkpoint, }, + UnrealizedVoteProcessing(state_processing::EpochProcessingError), + ParticipationCacheBuild(BeaconStateError), + ValidatorStatuses(BeaconStateError), } impl<T> From<InvalidAttestation> for Error<T> { @@ -59,6 +67,18 @@ impl<T> From<InvalidAttestation> for Error<T> { } } +impl<T> From<AttesterSlashingValidationError> for Error<T> { + fn from(e: AttesterSlashingValidationError) -> Self { + Error::InvalidAttesterSlashing(e) + } +} + +impl<T> From<state_processing::EpochProcessingError> for Error<T> { + fn from(e: state_processing::EpochProcessingError) -> Self { + Error::UnrealizedVoteProcessing(e) + } +} + #[derive(Debug)] pub enum InvalidBlock { UnknownParent(Hash256), @@ -114,6 +134,66 @@ impl<T> From<String> for Error<T> { } } +/// Indicates whether the unrealized justification of a block should be calculated and tracked. +/// If a block has been finalized, this can be set to false. This is useful when syncing finalized +/// portions of the chain. Otherwise this should always be set to true. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum CountUnrealized { + True, + False, +} + +impl CountUnrealized { + pub fn is_true(&self) -> bool { + matches!(self, CountUnrealized::True) + } + + pub fn and(&self, other: CountUnrealized) -> CountUnrealized { + if self.is_true() && other.is_true() { + CountUnrealized::True + } else { + CountUnrealized::False + } + } +} + +impl From<bool> for CountUnrealized { + fn from(count_unrealized: bool) -> Self { + if count_unrealized { + CountUnrealized::True + } else { + CountUnrealized::False + } + } +} + +#[derive(Copy, Clone)] +enum UpdateJustifiedCheckpointSlots { + OnTick { + current_slot: Slot, + }, + OnBlock { + state_slot: Slot, + current_slot: Slot, + }, +} + +impl UpdateJustifiedCheckpointSlots { + fn current_slot(&self) -> Slot { + match self { + UpdateJustifiedCheckpointSlots::OnTick { current_slot } => *current_slot, + UpdateJustifiedCheckpointSlots::OnBlock { current_slot, .. } => *current_slot, + } + } + + fn state_slot(&self) -> Option<Slot> { + match self { + UpdateJustifiedCheckpointSlots::OnTick { .. } => None, + UpdateJustifiedCheckpointSlots::OnBlock { state_slot, .. } => Some(*state_slot), + } + } +} + /// Indicates if a block has been verified by an execution payload. /// /// There is no variant for "invalid", since such a block should never be added to fork choice. @@ -162,51 +242,6 @@ fn compute_start_slot_at_epoch<E: EthSpec>(epoch: Epoch) -> Slot { epoch.start_slot(E::slots_per_epoch()) } -/// Called whenever the current time increases. -/// -/// ## Specification -/// -/// Equivalent to: -/// -/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick -fn on_tick<T, E>(store: &mut T, time: Slot) -> Result<(), Error<T::Error>> -where - T: ForkChoiceStore<E>, - E: EthSpec, -{ - let previous_slot = store.get_current_slot(); - - if time > previous_slot + 1 { - return Err(Error::InconsistentOnTick { - previous_slot, - time, - }); - } - - // Update store time. - store.set_current_slot(time); - - let current_slot = store.get_current_slot(); - - // Reset proposer boost if this is a new slot. - if current_slot > previous_slot { - store.set_proposer_boost_root(Hash256::zero()); - } - - // Not a new epoch, return. - if !(current_slot > previous_slot && compute_slots_since_epoch_start::<E>(current_slot) == 0) { - return Ok(()); - } - - if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { - store - .set_justified_checkpoint(*store.best_justified_checkpoint()) - .map_err(Error::ForkChoiceStoreError)?; - } - - Ok(()) -} - /// Used for queuing attestations from the current slot. Only contains the minimum necessary /// information about the attestation. #[derive(Clone, PartialEq, Encode, Decode)] @@ -259,6 +294,7 @@ pub enum AttestationFromBlock { pub struct ForkchoiceUpdateParameters { pub head_root: Hash256, pub head_hash: Option<ExecutionBlockHash>, + pub justified_hash: Option<ExecutionBlockHash>, pub finalized_hash: Option<ExecutionBlockHash>, } @@ -355,7 +391,7 @@ where // If the current slot is not provided, use the value that was last provided to the store. let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); - let proto_array = ProtoArrayForkChoice::new( + let proto_array = ProtoArrayForkChoice::new::<E>( finalized_block_slot, finalized_block_state_root, *fc_store.justified_checkpoint(), @@ -372,6 +408,7 @@ where // This will be updated during the next call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, + justified_hash: None, finalized_hash: None, head_root: Hash256::zero(), }, @@ -386,26 +423,6 @@ where Ok(fork_choice) } - /* - /// Instantiates `Self` from some existing components. - /// - /// This is useful if the existing components have been loaded from disk after a process - /// restart. - pub fn from_components( - fc_store: T, - proto_array: ProtoArrayForkChoice, - queued_attestations: Vec<QueuedAttestation>, - ) -> Self { - Self { - fc_store, - proto_array, - queued_attestations, - forkchoice_update_parameters: None, - _phantom: PhantomData, - } - } - */ - /// Returns cached information that can be used to issue a `forkchoiceUpdated` message to an /// execution engine. /// @@ -468,10 +485,13 @@ where /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_head pub fn get_head( &mut self, - current_slot: Slot, + system_time_current_slot: Slot, spec: &ChainSpec, ) -> Result<Hash256, Error<T::Error>> { - self.update_time(current_slot)?; + // Provide the slot (as per the system clock) to the `fc_store` and then return its view of + // the current slot. The `fc_store` will ensure that the `current_slot` is never + // decreasing, a property which we must maintain. + let current_slot = self.update_time(system_time_current_slot, spec)?; let store = &mut self.fc_store; @@ -480,6 +500,8 @@ where *store.finalized_checkpoint(), store.justified_balances(), store.proposer_boost_root(), + store.equivocating_indices(), + current_slot, spec, )?; @@ -489,13 +511,18 @@ where let head_hash = self .get_block(&head_root) .and_then(|b| b.execution_status.block_hash()); + let justified_root = self.justified_checkpoint().root; let finalized_root = self.finalized_checkpoint().root; + let justified_hash = self + .get_block(&justified_root) + .and_then(|b| b.execution_status.block_hash()); let finalized_hash = self .get_block(&finalized_root) .and_then(|b| b.execution_status.block_hash()); self.forkchoice_update_parameters = ForkchoiceUpdateParameters { head_root, head_hash, + justified_hash, finalized_hash, }; @@ -532,13 +559,11 @@ where /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#should_update_justified_checkpoint fn should_update_justified_checkpoint( &mut self, - current_slot: Slot, - state: &BeaconState<E>, + new_justified_checkpoint: Checkpoint, + slots: UpdateJustifiedCheckpointSlots, spec: &ChainSpec, ) -> Result<bool, Error<T::Error>> { - self.update_time(current_slot)?; - - let new_justified_checkpoint = &state.current_justified_checkpoint(); + self.update_time(slots.current_slot(), spec)?; if compute_slots_since_epoch_start::<E>(self.fc_store.get_current_slot()) < spec.safe_slots_to_update_justified @@ -550,11 +575,13 @@ where compute_start_slot_at_epoch::<E>(self.fc_store.justified_checkpoint().epoch); // This sanity check is not in the spec, but the invariant is implied. - if justified_slot >= state.slot() { - return Err(Error::AttemptToRevertJustification { - store: justified_slot, - state: state.slot(), - }); + if let Some(state_slot) = slots.state_slot() { + if justified_slot >= state_slot { + return Err(Error::AttemptToRevertJustification { + store: justified_slot, + state: state_slot, + }); + } } // We know that the slot for `new_justified_checkpoint.root` is not greater than @@ -615,22 +642,25 @@ where #[allow(clippy::too_many_arguments)] pub fn on_block<Payload: ExecPayload<E>>( &mut self, - current_slot: Slot, + system_time_current_slot: Slot, block: BeaconBlockRef<E, Payload>, block_root: Hash256, block_delay: Duration, state: &BeaconState<E>, payload_verification_status: PayloadVerificationStatus, spec: &ChainSpec, + count_unrealized: CountUnrealized, ) -> Result<(), Error<T::Error>> { - let current_slot = self.update_time(current_slot)?; + // Provide the slot (as per the system clock) to the `fc_store` and then return its view of + // the current slot. The `fc_store` will ensure that the `current_slot` is never + // decreasing, a property which we must maintain. + let current_slot = self.update_time(system_time_current_slot, spec)?; // Parent block must be known. - if !self.proto_array.contains_block(&block.parent_root()) { - return Err(Error::InvalidBlock(InvalidBlock::UnknownParent( - block.parent_root(), - ))); - } + let parent_block = self + .proto_array + .get_block(&block.parent_root()) + .ok_or_else(|| Error::InvalidBlock(InvalidBlock::UnknownParent(block.parent_root())))?; // Blocks cannot be in the future. If they are, their consideration must be delayed until // the are in the past. @@ -679,29 +709,110 @@ where self.fc_store.set_proposer_boost_root(block_root); } - // Update justified checkpoint. - if state.current_justified_checkpoint().epoch > self.fc_store.justified_checkpoint().epoch { - if state.current_justified_checkpoint().epoch - > self.fc_store.best_justified_checkpoint().epoch + let update_justified_checkpoint_slots = UpdateJustifiedCheckpointSlots::OnBlock { + state_slot: state.slot(), + current_slot, + }; + + // Update store with checkpoints if necessary + self.update_checkpoints( + state.current_justified_checkpoint(), + state.finalized_checkpoint(), + update_justified_checkpoint_slots, + spec, + )?; + + // Update unrealized justified/finalized checkpoints. + let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if count_unrealized + .is_true() + { + let block_epoch = block.slot().epoch(E::slots_per_epoch()); + + // If the parent checkpoints are already at the same epoch as the block being imported, + // it's impossible for the unrealized checkpoints to differ from the parent's. This + // holds true because: + // + // 1. A child block cannot have lower FFG checkpoints than its parent. + // 2. A block in epoch `N` cannot contain attestations which would justify an epoch higher than `N`. + // 3. A block in epoch `N` cannot contain attestations which would finalize an epoch higher than `N - 1`. + // + // This is an optimization. It should reduce the amount of times we run + // `process_justification_and_finalization` by approximately 1/3rd when the chain is + // performing optimally. + let parent_checkpoints = parent_block + .unrealized_justified_checkpoint + .zip(parent_block.unrealized_finalized_checkpoint) + .filter(|(parent_justified, parent_finalized)| { + parent_justified.epoch == block_epoch + && parent_finalized.epoch + 1 >= block_epoch + }); + + let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = + if let Some((parent_justified, parent_finalized)) = parent_checkpoints { + (parent_justified, parent_finalized) + } else { + let justification_and_finalization_state = match block { + BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { + let participation_cache = + per_epoch_processing::altair::ParticipationCache::new(state, spec) + .map_err(Error::ParticipationCacheBuild)?; + per_epoch_processing::altair::process_justification_and_finalization( + state, + &participation_cache, + )? + } + BeaconBlockRef::Base(_) => { + let mut validator_statuses = + per_epoch_processing::base::ValidatorStatuses::new(state, spec) + .map_err(Error::ValidatorStatuses)?; + validator_statuses + .process_attestations(state) + .map_err(Error::ValidatorStatuses)?; + per_epoch_processing::base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )? + } + }; + + ( + justification_and_finalization_state.current_justified_checkpoint(), + justification_and_finalization_state.finalized_checkpoint(), + ) + }; + + // Update best known unrealized justified & finalized checkpoints + if unrealized_justified_checkpoint.epoch + > self.fc_store.unrealized_justified_checkpoint().epoch { self.fc_store - .set_best_justified_checkpoint(state.current_justified_checkpoint()); + .set_unrealized_justified_checkpoint(unrealized_justified_checkpoint); } - if self.should_update_justified_checkpoint(current_slot, state, spec)? { + if unrealized_finalized_checkpoint.epoch + > self.fc_store.unrealized_finalized_checkpoint().epoch + { self.fc_store - .set_justified_checkpoint(state.current_justified_checkpoint()) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; + .set_unrealized_finalized_checkpoint(unrealized_finalized_checkpoint); } - } - // Update finalized checkpoint. - if state.finalized_checkpoint().epoch > self.fc_store.finalized_checkpoint().epoch { - self.fc_store - .set_finalized_checkpoint(state.finalized_checkpoint()); - self.fc_store - .set_justified_checkpoint(state.current_justified_checkpoint()) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; - } + // If block is from past epochs, try to update store's justified & finalized checkpoints right away + if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { + self.update_checkpoints( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + update_justified_checkpoint_slots, + spec, + )?; + } + + ( + Some(unrealized_justified_checkpoint), + Some(unrealized_finalized_checkpoint), + ) + } else { + (None, None) + }; let target_slot = block .slot() @@ -750,32 +861,68 @@ where // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. - self.proto_array.process_block(ProtoBlock { - slot: block.slot(), - root: block_root, - parent_root: Some(block.parent_root()), - target_root, - current_epoch_shuffling_id: AttestationShufflingId::new( - block_root, - state, - RelativeEpoch::Current, - ) - .map_err(Error::BeaconStateError)?, - next_epoch_shuffling_id: AttestationShufflingId::new( - block_root, - state, - RelativeEpoch::Next, - ) - .map_err(Error::BeaconStateError)?, - state_root: block.state_root(), - justified_checkpoint: state.current_justified_checkpoint(), - finalized_checkpoint: state.finalized_checkpoint(), - execution_status, - })?; + self.proto_array.process_block::<E>( + ProtoBlock { + slot: block.slot(), + root: block_root, + parent_root: Some(block.parent_root()), + target_root, + current_epoch_shuffling_id: AttestationShufflingId::new( + block_root, + state, + RelativeEpoch::Current, + ) + .map_err(Error::BeaconStateError)?, + next_epoch_shuffling_id: AttestationShufflingId::new( + block_root, + state, + RelativeEpoch::Next, + ) + .map_err(Error::BeaconStateError)?, + state_root: block.state_root(), + justified_checkpoint: state.current_justified_checkpoint(), + finalized_checkpoint: state.finalized_checkpoint(), + execution_status, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + }, + current_slot, + )?; Ok(()) } + /// Update checkpoints in store if necessary + fn update_checkpoints( + &mut self, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + slots: UpdateJustifiedCheckpointSlots, + spec: &ChainSpec, + ) -> Result<(), Error<T::Error>> { + // Update justified checkpoint. + if justified_checkpoint.epoch > self.fc_store.justified_checkpoint().epoch { + if justified_checkpoint.epoch > self.fc_store.best_justified_checkpoint().epoch { + self.fc_store + .set_best_justified_checkpoint(justified_checkpoint); + } + if self.should_update_justified_checkpoint(justified_checkpoint, slots, spec)? { + self.fc_store + .set_justified_checkpoint(justified_checkpoint) + .map_err(Error::UnableToSetJustifiedCheckpoint)?; + } + } + + // Update finalized checkpoint. + if finalized_checkpoint.epoch > self.fc_store.finalized_checkpoint().epoch { + self.fc_store.set_finalized_checkpoint(finalized_checkpoint); + self.fc_store + .set_justified_checkpoint(justified_checkpoint) + .map_err(Error::UnableToSetJustifiedCheckpoint)?; + } + Ok(()) + } + /// Validates the `epoch` against the current time according to the fork choice store. /// /// ## Specification @@ -910,12 +1057,12 @@ where /// will not be run here. pub fn on_attestation( &mut self, - current_slot: Slot, + system_time_current_slot: Slot, attestation: &IndexedAttestation<E>, is_from_block: AttestationFromBlock, + spec: &ChainSpec, ) -> Result<(), Error<T::Error>> { - // Ensure the store is up-to-date. - self.update_time(current_slot)?; + self.update_time(system_time_current_slot, spec)?; // Ignore any attestations to the zero hash. // @@ -958,14 +1105,34 @@ where Ok(()) } + /// Apply an attester slashing to fork choice. + /// + /// We assume that the attester slashing provided to this function has already been verified. + pub fn on_attester_slashing(&mut self, slashing: &AttesterSlashing<E>) { + let attesting_indices_set = |att: &IndexedAttestation<E>| { + att.attesting_indices + .iter() + .copied() + .collect::<BTreeSet<_>>() + }; + let att1_indices = attesting_indices_set(&slashing.attestation_1); + let att2_indices = attesting_indices_set(&slashing.attestation_2); + self.fc_store + .extend_equivocating_indices(att1_indices.intersection(&att2_indices).copied()); + } + /// Call `on_tick` for all slots between `fc_store.get_current_slot()` and the provided /// `current_slot`. Returns the value of `self.fc_store.get_current_slot`. - pub fn update_time(&mut self, current_slot: Slot) -> Result<Slot, Error<T::Error>> { + pub fn update_time( + &mut self, + current_slot: Slot, + spec: &ChainSpec, + ) -> Result<Slot, Error<T::Error>> { while self.fc_store.get_current_slot() < current_slot { let previous_slot = self.fc_store.get_current_slot(); // Note: we are relying upon `on_tick` to update `fc_store.time` to ensure we don't // get stuck in a loop. - on_tick(&mut self.fc_store, previous_slot + 1)? + self.on_tick(previous_slot + 1, spec)? } // Process any attestations that might now be eligible. @@ -974,6 +1141,63 @@ where Ok(self.fc_store.get_current_slot()) } + /// Called whenever the current time increases. + /// + /// ## Specification + /// + /// Equivalent to: + /// + /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick + fn on_tick(&mut self, time: Slot, spec: &ChainSpec) -> Result<(), Error<T::Error>> { + let store = &mut self.fc_store; + let previous_slot = store.get_current_slot(); + + if time > previous_slot + 1 { + return Err(Error::InconsistentOnTick { + previous_slot, + time, + }); + } + + // Update store time. + store.set_current_slot(time); + + let current_slot = store.get_current_slot(); + + // Reset proposer boost if this is a new slot. + if current_slot > previous_slot { + store.set_proposer_boost_root(Hash256::zero()); + } + + // Not a new epoch, return. + if !(current_slot > previous_slot + && compute_slots_since_epoch_start::<E>(current_slot) == 0) + { + return Ok(()); + } + + if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { + let store = &self.fc_store; + if self.is_descendant_of_finalized(store.best_justified_checkpoint().root) { + let store = &mut self.fc_store; + store + .set_justified_checkpoint(*store.best_justified_checkpoint()) + .map_err(Error::ForkChoiceStoreError)?; + } + } + + // Update store.justified_checkpoint if a better unrealized justified checkpoint is known + let unrealized_justified_checkpoint = *self.fc_store.unrealized_justified_checkpoint(); + let unrealized_finalized_checkpoint = *self.fc_store.unrealized_finalized_checkpoint(); + self.update_checkpoints( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + UpdateJustifiedCheckpointSlots::OnTick { current_slot }, + spec, + )?; + Ok(()) + } + /// Processes and removes from the queue any queued attestations which may now be eligible for /// processing due to the slot clock incrementing. fn process_attestation_queue(&mut self) -> Result<(), Error<T::Error>> { @@ -1050,34 +1274,40 @@ where .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) } - /// Returns `Ok(true)` if `block_root` has been imported optimistically. That is, the - /// execution payload has not been verified. + /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. /// - /// Returns `Ok(false)` if `block_root`'s execution payload has been verfied, if it is a - /// pre-Bellatrix block or if it is before the PoW terminal block. + /// Returns `Ok(false)` if `block_root`'s execution payload has been elected as fully VALID, if + /// it is a pre-Bellatrix block or if it is before the PoW terminal block. /// /// In the case where the block could not be found in fork-choice, it returns the /// `execution_status` of the current finalized block. /// /// This function assumes the `block_root` exists. - pub fn is_optimistic_block(&self, block_root: &Hash256) -> Result<bool, Error<T::Error>> { + pub fn is_optimistic_or_invalid_block( + &self, + block_root: &Hash256, + ) -> Result<bool, Error<T::Error>> { if let Some(status) = self.get_block_execution_status(block_root) { - Ok(status.is_optimistic()) + Ok(status.is_optimistic_or_invalid()) } else { - Ok(self.get_finalized_block()?.execution_status.is_optimistic()) + Ok(self + .get_finalized_block()? + .execution_status + .is_optimistic_or_invalid()) } } /// The same as `is_optimistic_block` but does not fallback to `self.get_finalized_block` /// when the block cannot be found. /// - /// Intended to be used when checking if the head has been imported optimistically. - pub fn is_optimistic_block_no_fallback( + /// Intended to be used when checking if the head has been imported optimistically or is + /// invalid. + pub fn is_optimistic_or_invalid_block_no_fallback( &self, block_root: &Hash256, ) -> Result<bool, Error<T::Error>> { if let Some(status) = self.get_block_execution_status(block_root) { - Ok(status.is_optimistic()) + Ok(status.is_optimistic_or_invalid()) } else { Err(Error::MissingProtoArrayBlock(*block_root)) } @@ -1113,8 +1343,6 @@ where // If the parent block has execution enabled, always import the block. // - // TODO(bellatrix): this condition has not yet been merged into the spec. - // // See: // // https://github.com/ethereum/consensus-specs/pull/2844 @@ -1151,6 +1379,14 @@ where *self.fc_store.best_justified_checkpoint() } + pub fn unrealized_justified_checkpoint(&self) -> Checkpoint { + *self.fc_store.unrealized_justified_checkpoint() + } + + pub fn unrealized_finalized_checkpoint(&self) -> Checkpoint { + *self.fc_store.unrealized_finalized_checkpoint() + } + /// Returns the latest message for a given validator, if any. /// /// Returns `(block_root, block_slot)`. @@ -1168,6 +1404,12 @@ where &self.proto_array } + /// Returns a mutable reference to `proto_array`. + /// Should only be used in testing. + pub fn proto_array_mut(&mut self) -> &mut ProtoArrayForkChoice { + &mut self.proto_array + } + /// Returns a reference to the underlying `fc_store`. pub fn fc_store(&self) -> &T { &self.fc_store @@ -1211,6 +1453,7 @@ where // Will be updated in the following call to `Self::get_head`. forkchoice_update_parameters: ForkchoiceUpdateParameters { head_hash: None, + justified_hash: None, finalized_hash: None, head_root: Hash256::zero(), }, @@ -1219,7 +1462,17 @@ where _phantom: PhantomData, }; - fork_choice.get_head(current_slot, spec)?; + // If a call to `get_head` fails, the only known cause is because the only head with viable + // FFG properties is has an invalid payload. In this scenario, set all the payloads back to + // an optimistic status so that we can have a head to start from. + if fork_choice.get_head(current_slot, spec).is_err() { + fork_choice + .proto_array + .set_all_blocks_to_optimistic::<E>(spec)?; + // If the second attempt at finding a head fails, return an error since we do not + // expect this scenario. + fork_choice.get_head(current_slot, spec)?; + } Ok(fork_choice) } diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 6df0cbc2c2..6a4616e9f3 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeSet; use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": @@ -50,6 +51,12 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized { /// Returns the `finalized_checkpoint`. fn finalized_checkpoint(&self) -> &Checkpoint; + /// Returns the `unrealized_justified_checkpoint`. + fn unrealized_justified_checkpoint(&self) -> &Checkpoint; + + /// Returns the `unrealized_finalized_checkpoint`. + fn unrealized_finalized_checkpoint(&self) -> &Checkpoint; + /// Returns the `proposer_boost_root`. fn proposer_boost_root(&self) -> Hash256; @@ -62,6 +69,18 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized { /// Sets the `best_justified_checkpoint`. fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint); + /// Sets the `unrealized_justified_checkpoint`. + fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint); + + /// Sets the `unrealized_finalized_checkpoint`. + fn set_unrealized_finalized_checkpoint(&mut self, checkpoint: Checkpoint); + /// Sets the proposer boost root. fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256); + + /// Gets the equivocating indices. + fn equivocating_indices(&self) -> &BTreeSet<u64>; + + /// Adds to the set of equivocating indices. + fn extend_equivocating_indices(&mut self, indices: impl IntoIterator<Item = u64>); } diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 6f79b488dd..6cb2010f1a 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -2,9 +2,9 @@ mod fork_choice; mod fork_choice_store; pub use crate::fork_choice::{ - AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, - InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, - QueuedAttestation, + AttestationFromBlock, CountUnrealized, Error, ForkChoice, ForkChoiceView, + ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, + PersistedForkChoice, QueuedAttestation, }; pub use fork_choice_store::ForkChoiceStore; pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 2d10319cf0..850f7c4a12 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -12,7 +12,8 @@ use beacon_chain::{ StateSkipConfig, WhenSlotSkipped, }; use fork_choice::{ - ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, + CountUnrealized, ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, + QueuedAttestation, }; use store::MemoryStore; use types::{ @@ -150,7 +151,7 @@ impl ForkChoiceTest { .chain .canonical_head .fork_choice_write_lock() - .update_time(self.harness.chain.slot().unwrap()) + .update_time(self.harness.chain.slot().unwrap(), &self.harness.spec) .unwrap(); func( self.harness @@ -292,6 +293,7 @@ impl ForkChoiceTest { &state, PayloadVerificationStatus::Verified, &self.harness.chain.spec, + CountUnrealized::True, ) .unwrap(); self @@ -334,6 +336,7 @@ impl ForkChoiceTest { &state, PayloadVerificationStatus::Verified, &self.harness.chain.spec, + CountUnrealized::True, ) .err() .expect("on_block did not return an error"); diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 79b4cb2d80..826bf6c3a7 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -1,4 +1,4 @@ -use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256}; +use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256, Slot}; #[derive(Clone, PartialEq, Debug)] pub enum Error { @@ -52,6 +52,7 @@ pub enum Error { #[derive(Clone, PartialEq, Debug)] pub struct InvalidBestNodeInfo { + pub current_slot: Slot, pub start_root: Hash256, pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 2be46cc590..fcb1b94d6f 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -6,6 +6,7 @@ mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::InvalidationOperation; use serde_derive::{Deserialize, Serialize}; +use std::collections::BTreeSet; use types::{ AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, Slot, @@ -78,7 +79,7 @@ impl ForkChoiceTestDefinition { let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); - let mut fork_choice = ProtoArrayForkChoice::new( + let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>( self.finalized_block_slot, Hash256::zero(), self.justified_checkpoint, @@ -88,6 +89,7 @@ impl ForkChoiceTestDefinition { ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), ) .expect("should create fork choice struct"); + let equivocating_indices = BTreeSet::new(); for (op_index, op) in self.operations.into_iter().enumerate() { match op.clone() { @@ -103,6 +105,8 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), + &equivocating_indices, + Slot::new(0), &spec, ) .unwrap_or_else(|e| { @@ -129,6 +133,8 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, proposer_boost_root, + &equivocating_indices, + Slot::new(0), &spec, ) .unwrap_or_else(|e| { @@ -152,6 +158,8 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), + &equivocating_indices, + Slot::new(0), &spec, ); @@ -190,13 +198,17 @@ impl ForkChoiceTestDefinition { execution_status: ExecutionStatus::Optimistic( ExecutionBlockHash::from_root(root), ), + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, }; - fork_choice.process_block(block).unwrap_or_else(|e| { - panic!( - "process_block op at index {} returned error: {:?}", - op_index, e - ) - }); + fork_choice + .process_block::<MainnetEthSpec>(block, slot) + .unwrap_or_else(|e| { + panic!( + "process_block op at index {} returned error: {:?}", + op_index, e + ) + }); check_bytes_round_trip(&fork_choice); } Operation::ProcessAttestation { diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 22d457ca3e..390eb902a7 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -97,6 +97,10 @@ pub struct ProtoNode { /// Indicates if an execution node has marked this block as valid. Also contains the execution /// block hash. pub execution_status: ExecutionStatus, + #[ssz(with = "four_byte_option_checkpoint")] + pub unrealized_justified_checkpoint: Option<Checkpoint>, + #[ssz(with = "four_byte_option_checkpoint")] + pub unrealized_finalized_checkpoint: Option<Checkpoint>, } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -140,6 +144,7 @@ impl ProtoArray { /// - Compare the current node with the parents best-child, updating it if the current node /// should become the best child. /// - If required, update the parents best-descendant with the current node or its best-descendant. + #[allow(clippy::too_many_arguments)] pub fn apply_score_changes<E: EthSpec>( &mut self, mut deltas: Vec<i64>, @@ -147,6 +152,7 @@ impl ProtoArray { finalized_checkpoint: Checkpoint, new_balances: &[u64], proposer_boost_root: Hash256, + current_slot: Slot, spec: &ChainSpec, ) -> Result<(), Error> { if deltas.len() != self.indices.len() { @@ -280,7 +286,11 @@ impl ProtoArray { // If the node has a parent, try to update its best-child and best-descendant. if let Some(parent_index) = node.parent { - self.maybe_update_best_child_and_descendant(parent_index, node_index)?; + self.maybe_update_best_child_and_descendant::<E>( + parent_index, + node_index, + current_slot, + )?; } } @@ -290,7 +300,7 @@ impl ProtoArray { /// Register a block with the fork choice. /// /// It is only sane to supply a `None` parent for the genesis block. - pub fn on_block(&mut self, block: Block) -> Result<(), Error> { + pub fn on_block<E: EthSpec>(&mut self, block: Block, current_slot: Slot) -> Result<(), Error> { // If the block is already known, simply ignore it. if self.indices.contains_key(&block.root) { return Ok(()); @@ -314,6 +324,8 @@ impl ProtoArray { best_child: None, best_descendant: None, execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, }; // If the parent has an invalid execution status, return an error before adding the block to @@ -335,7 +347,11 @@ impl ProtoArray { self.nodes.push(node.clone()); if let Some(parent_index) = node.parent { - self.maybe_update_best_child_and_descendant(parent_index, node_index)?; + self.maybe_update_best_child_and_descendant::<E>( + parent_index, + node_index, + current_slot, + )?; if matches!(block.execution_status, ExecutionStatus::Valid(_)) { self.propagate_execution_payload_validation_by_index(parent_index)?; @@ -491,9 +507,6 @@ impl ProtoArray { node.best_descendant = None } - // It might be new knowledge that this block is valid, ensure that it and all - // ancestors are marked as valid. - self.propagate_execution_payload_validation_by_index(index)?; break; } } @@ -607,7 +620,11 @@ impl ProtoArray { /// been called without a subsequent `Self::apply_score_changes` call. This is because /// `on_new_block` does not attempt to walk backwards through the tree and update the /// best-child/best-descendant links. - pub fn find_head(&self, justified_root: &Hash256) -> Result<Hash256, Error> { + pub fn find_head<E: EthSpec>( + &self, + justified_root: &Hash256, + current_slot: Slot, + ) -> Result<Hash256, Error> { let justified_index = self .indices .get(justified_root) @@ -640,8 +657,9 @@ impl ProtoArray { .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; // Perform a sanity check that the node is indeed valid to be the head. - if !self.node_is_viable_for_head(best_node) { + if !self.node_is_viable_for_head::<E>(best_node, current_slot) { return Err(Error::InvalidBestNode(Box::new(InvalidBestNodeInfo { + current_slot, start_root: *justified_root, justified_checkpoint: self.justified_checkpoint, finalized_checkpoint: self.finalized_checkpoint, @@ -736,10 +754,11 @@ impl ProtoArray { /// best-descendant. /// - The child is not the best child but becomes the best child. /// - The child is not the best child and does not become the best child. - fn maybe_update_best_child_and_descendant( + fn maybe_update_best_child_and_descendant<E: EthSpec>( &mut self, parent_index: usize, child_index: usize, + current_slot: Slot, ) -> Result<(), Error> { let child = self .nodes @@ -751,7 +770,8 @@ impl ProtoArray { .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - let child_leads_to_viable_head = self.node_leads_to_viable_head(child)?; + let child_leads_to_viable_head = + self.node_leads_to_viable_head::<E>(child, current_slot)?; // These three variables are aliases to the three options that we may set the // `parent.best_child` and `parent.best_descendant` to. @@ -764,54 +784,54 @@ impl ProtoArray { ); let no_change = (parent.best_child, parent.best_descendant); - let (new_best_child, new_best_descendant) = if let Some(best_child_index) = - parent.best_child - { - if best_child_index == child_index && !child_leads_to_viable_head { - // If the child is already the best-child of the parent but it's not viable for - // the head, remove it. - change_to_none - } else if best_child_index == child_index { - // If the child is the best-child already, set it again to ensure that the - // best-descendant of the parent is updated. - change_to_child - } else { - let best_child = self - .nodes - .get(best_child_index) - .ok_or(Error::InvalidBestDescendant(best_child_index))?; - - let best_child_leads_to_viable_head = self.node_leads_to_viable_head(best_child)?; - - if child_leads_to_viable_head && !best_child_leads_to_viable_head { - // The child leads to a viable head, but the current best-child doesn't. + let (new_best_child, new_best_descendant) = + if let Some(best_child_index) = parent.best_child { + if best_child_index == child_index && !child_leads_to_viable_head { + // If the child is already the best-child of the parent but it's not viable for + // the head, remove it. + change_to_none + } else if best_child_index == child_index { + // If the child is the best-child already, set it again to ensure that the + // best-descendant of the parent is updated. change_to_child - } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { - // The best child leads to a viable head, but the child doesn't. - no_change - } else if child.weight == best_child.weight { - // Tie-breaker of equal weights by root. - if child.root >= best_child.root { - change_to_child - } else { - no_change - } } else { - // Choose the winner by weight. - if child.weight >= best_child.weight { + let best_child = self + .nodes + .get(best_child_index) + .ok_or(Error::InvalidBestDescendant(best_child_index))?; + + let best_child_leads_to_viable_head = + self.node_leads_to_viable_head::<E>(best_child, current_slot)?; + + if child_leads_to_viable_head && !best_child_leads_to_viable_head { + // The child leads to a viable head, but the current best-child doesn't. change_to_child - } else { + } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { + // The best child leads to a viable head, but the child doesn't. no_change + } else if child.weight == best_child.weight { + // Tie-breaker of equal weights by root. + if child.root >= best_child.root { + change_to_child + } else { + no_change + } + } else { + // Choose the winner by weight. + if child.weight >= best_child.weight { + change_to_child + } else { + no_change + } } } - } - } else if child_leads_to_viable_head { - // There is no current best-child and the child is viable. - change_to_child - } else { - // There is no current best-child but the child is not viable. - no_change - }; + } else if child_leads_to_viable_head { + // There is no current best-child and the child is viable. + change_to_child + } else { + // There is no current best-child but the child is not viable. + no_change + }; let parent = self .nodes @@ -826,7 +846,11 @@ impl ProtoArray { /// Indicates if the node itself is viable for the head, or if it's best descendant is viable /// for the head. - fn node_leads_to_viable_head(&self, node: &ProtoNode) -> Result<bool, Error> { + fn node_leads_to_viable_head<E: EthSpec>( + &self, + node: &ProtoNode, + current_slot: Slot, + ) -> Result<bool, Error> { let best_descendant_is_viable_for_head = if let Some(best_descendant_index) = node.best_descendant { let best_descendant = self @@ -834,12 +858,13 @@ impl ProtoArray { .get(best_descendant_index) .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; - self.node_is_viable_for_head(best_descendant) + self.node_is_viable_for_head::<E>(best_descendant, current_slot) } else { false }; - Ok(best_descendant_is_viable_for_head || self.node_is_viable_for_head(node)) + Ok(best_descendant_is_viable_for_head + || self.node_is_viable_for_head::<E>(node, current_slot)) } /// This is the equivalent to the `filter_block_tree` function in the eth2 spec: @@ -848,18 +873,43 @@ impl ProtoArray { /// /// Any node that has a different finalized or justified epoch should not be viable for the /// head. - fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool { + fn node_is_viable_for_head<E: EthSpec>(&self, node: &ProtoNode, current_slot: Slot) -> bool { if node.execution_status.is_invalid() { return false; } - if let (Some(node_justified_checkpoint), Some(node_finalized_checkpoint)) = + let checkpoint_match_predicate = + |node_justified_checkpoint: Checkpoint, node_finalized_checkpoint: Checkpoint| { + let correct_justified = node_justified_checkpoint == self.justified_checkpoint + || self.justified_checkpoint.epoch == Epoch::new(0); + let correct_finalized = node_finalized_checkpoint == self.finalized_checkpoint + || self.finalized_checkpoint.epoch == Epoch::new(0); + correct_justified && correct_finalized + }; + + if let ( + Some(unrealized_justified_checkpoint), + Some(unrealized_finalized_checkpoint), + Some(justified_checkpoint), + Some(finalized_checkpoint), + ) = ( + node.unrealized_justified_checkpoint, + node.unrealized_finalized_checkpoint, + node.justified_checkpoint, + node.finalized_checkpoint, + ) { + if node.slot.epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { + checkpoint_match_predicate( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + ) + } else { + checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) + } + } else if let (Some(justified_checkpoint), Some(finalized_checkpoint)) = (node.justified_checkpoint, node.finalized_checkpoint) { - (node_justified_checkpoint == self.justified_checkpoint - || self.justified_checkpoint.epoch == Epoch::new(0)) - && (node_finalized_checkpoint == self.finalized_checkpoint - || self.finalized_checkpoint.epoch == Epoch::new(0)) + checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) } else { false } @@ -931,7 +981,7 @@ impl ProtoArray { /// Returns `None` if there is an overflow or underflow when calculating the score. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance -fn calculate_proposer_boost<E: EthSpec>( +pub fn calculate_proposer_boost<E: EthSpec>( validator_balances: &[u64], proposer_score_boost: u64, ) -> Option<u64> { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 88bf7840c2..306c986018 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,10 +1,12 @@ use crate::error::Error; -use crate::proto_array::{InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode}; +use crate::proto_array::{ + calculate_proposer_boost, InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode, +}; use crate::ssz_container::SszContainer; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; +use std::collections::{BTreeSet, HashMap}; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -87,10 +89,22 @@ impl ExecutionStatus { /// /// - Has execution enabled, AND /// - Has a payload that has not yet been verified by an EL. - pub fn is_optimistic(&self) -> bool { + pub fn is_strictly_optimistic(&self) -> bool { matches!(self, ExecutionStatus::Optimistic(_)) } + /// Returns `true` if the block: + /// + /// - Has execution enabled, AND + /// - Has a payload that has not yet been verified by an EL, OR. + /// - Has a payload that has been deemed invalid by an EL. + pub fn is_optimistic_or_invalid(&self) -> bool { + matches!( + self, + ExecutionStatus::Optimistic(_) | ExecutionStatus::Invalid(_) + ) + } + /// Returns `true` if the block: /// /// - Has execution enabled, AND @@ -124,6 +138,8 @@ pub struct Block { /// Indicates if an execution node has marked this block as valid. Also contains the execution /// block hash. pub execution_status: ExecutionStatus, + pub unrealized_justified_checkpoint: Option<Checkpoint>, + pub unrealized_finalized_checkpoint: Option<Checkpoint>, } /// A Vec-wrapper which will grow to match any request. @@ -162,7 +178,7 @@ pub struct ProtoArrayForkChoice { impl ProtoArrayForkChoice { #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new<E: EthSpec>( finalized_block_slot: Slot, finalized_block_state_root: Hash256, justified_checkpoint: Checkpoint, @@ -193,10 +209,12 @@ impl ProtoArrayForkChoice { justified_checkpoint, finalized_checkpoint, execution_status, + unrealized_justified_checkpoint: Some(justified_checkpoint), + unrealized_finalized_checkpoint: Some(finalized_checkpoint), }; proto_array - .on_block(block) + .on_block::<E>(block, finalized_block_slot) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; Ok(Self { @@ -242,22 +260,29 @@ impl ProtoArrayForkChoice { Ok(()) } - pub fn process_block(&mut self, block: Block) -> Result<(), String> { + pub fn process_block<E: EthSpec>( + &mut self, + block: Block, + current_slot: Slot, + ) -> Result<(), String> { if block.parent_root.is_none() { return Err("Missing parent root".to_string()); } self.proto_array - .on_block(block) + .on_block::<E>(block, current_slot) .map_err(|e| format!("process_block_error: {:?}", e)) } + #[allow(clippy::too_many_arguments)] pub fn find_head<E: EthSpec>( &mut self, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, justified_state_balances: &[u64], proposer_boost_root: Hash256, + equivocating_indices: &BTreeSet<u64>, + current_slot: Slot, spec: &ChainSpec, ) -> Result<Hash256, String> { let old_balances = &mut self.balances; @@ -269,6 +294,7 @@ impl ProtoArrayForkChoice { &mut self.votes, old_balances, new_balances, + equivocating_indices, ) .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; @@ -279,6 +305,7 @@ impl ProtoArrayForkChoice { finalized_checkpoint, new_balances, proposer_boost_root, + current_slot, spec, ) .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; @@ -286,10 +313,110 @@ impl ProtoArrayForkChoice { *old_balances = new_balances.to_vec(); self.proto_array - .find_head(&justified_checkpoint.root) + .find_head::<E>(&justified_checkpoint.root, current_slot) .map_err(|e| format!("find_head failed: {:?}", e)) } + /// For all nodes, regardless of their relationship to the finalized block, set their execution + /// status to be optimistic. + /// + /// In practice this means forgetting any `VALID` or `INVALID` statuses. + pub fn set_all_blocks_to_optimistic<E: EthSpec>( + &mut self, + spec: &ChainSpec, + ) -> Result<(), String> { + // Iterate backwards through all nodes in the `proto_array`. Whilst it's not strictly + // required to do this process in reverse, it seems natural when we consider how LMD votes + // are counted. + // + // This function will touch all blocks, even those that do not descend from the finalized + // block. Since this function is expected to run at start-up during very rare + // circumstances we prefer simplicity over efficiency. + for node_index in (0..self.proto_array.nodes.len()).rev() { + let node = self + .proto_array + .nodes + .get_mut(node_index) + .ok_or("unreachable index out of bounds in proto_array nodes")?; + + match node.execution_status { + ExecutionStatus::Invalid(block_hash) => { + node.execution_status = ExecutionStatus::Optimistic(block_hash); + + // Restore the weight of the node, it would have been set to `0` in + // `apply_score_changes` when it was invalidated. + let mut restored_weight: u64 = self + .votes + .0 + .iter() + .enumerate() + .filter_map(|(validator_index, vote)| { + if vote.current_root == node.root { + // Any voting validator that does not have a balance should be + // ignored. This is consistent with `compute_deltas`. + self.balances.get(validator_index) + } else { + None + } + }) + .sum(); + + // If the invalid root was boosted, apply the weight to it and + // ancestors. + if let Some(proposer_score_boost) = spec.proposer_score_boost { + if self.proto_array.previous_proposer_boost.root == node.root { + // Compute the score based upon the current balances. We can't rely on + // the `previous_proposr_boost.score` since it is set to zero with an + // invalid node. + let proposer_score = + calculate_proposer_boost::<E>(&self.balances, proposer_score_boost) + .ok_or("Failed to compute proposer boost")?; + // Store the score we've applied here so it can be removed in + // a later call to `apply_score_changes`. + self.proto_array.previous_proposer_boost.score = proposer_score; + // Apply this boost to this node. + restored_weight = restored_weight + .checked_add(proposer_score) + .ok_or("Overflow when adding boost to weight")?; + } + } + + // Add the restored weight to the node and all ancestors. + if restored_weight > 0 { + let mut node_or_ancestor = node; + loop { + node_or_ancestor.weight = node_or_ancestor + .weight + .checked_add(restored_weight) + .ok_or("Overflow when adding weight to ancestor")?; + + if let Some(parent_index) = node_or_ancestor.parent { + node_or_ancestor = self + .proto_array + .nodes + .get_mut(parent_index) + .ok_or(format!("Missing parent index: {}", parent_index))?; + } else { + // This is either the finalized block or a block that does not + // descend from the finalized block. + break; + } + } + } + } + // There are no balance changes required if the node was either valid or + // optimistic. + ExecutionStatus::Valid(block_hash) | ExecutionStatus::Optimistic(block_hash) => { + node.execution_status = ExecutionStatus::Optimistic(block_hash) + } + // An irrelevant node cannot become optimistic, this is a no-op. + ExecutionStatus::Irrelevant(_) => (), + } + } + + Ok(()) + } + pub fn maybe_prune(&mut self, finalized_root: Hash256) -> Result<(), String> { self.proto_array .maybe_prune(finalized_root) @@ -341,6 +468,8 @@ impl ProtoArrayForkChoice { justified_checkpoint, finalized_checkpoint, execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, }) } else { None @@ -427,6 +556,7 @@ fn compute_deltas( votes: &mut ElasticList<VoteTracker>, old_balances: &[u64], new_balances: &[u64], + equivocating_indices: &BTreeSet<u64>, ) -> Result<Vec<i64>, Error> { let mut deltas = vec![0_i64; indices.len()]; @@ -437,6 +567,38 @@ fn compute_deltas( continue; } + // Handle newly slashed validators by deducting their weight from their current vote. We + // determine if they are newly slashed by checking whether their `vote.current_root` is + // non-zero. After applying the deduction a single time we set their `current_root` to zero + // and never update it again (thus preventing repeat deductions). + // + // Even if they make new attestations which are processed by `process_attestation` these + // will only update their `vote.next_root`. + if equivocating_indices.contains(&(val_index as u64)) { + // First time we've processed this slashing in fork choice: + // + // 1. Add a negative delta for their `current_root`. + // 2. Set their `current_root` (permanently) to zero. + if !vote.current_root.is_zero() { + let old_balance = old_balances.get(val_index).copied().unwrap_or(0); + + if let Some(current_delta_index) = indices.get(&vote.current_root).copied() { + let delta = deltas + .get(current_delta_index) + .ok_or(Error::InvalidNodeDelta(current_delta_index))? + .checked_sub(old_balance as i64) + .ok_or(Error::DeltaOverflow(current_delta_index))?; + + // Array access safe due to check on previous line. + deltas[current_delta_index] = delta; + } + + vote.current_root = Hash256::zero(); + } + // We've handled this slashed validator, continue without applying an ordinary delta. + continue; + } + // If the validator was not included in the _old_ balances (i.e., it did not exist yet) // then say its balance was zero. let old_balance = old_balances.get(val_index).copied().unwrap_or(0); @@ -485,6 +647,7 @@ fn compute_deltas( #[cfg(test)] mod test_compute_deltas { use super::*; + use types::MainnetEthSpec; /// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. fn hash_from_index(i: usize) -> Hash256 { @@ -510,7 +673,7 @@ mod test_compute_deltas { root: finalized_root, }; - let mut fc = ProtoArrayForkChoice::new( + let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>( genesis_slot, state_root, genesis_checkpoint, @@ -523,34 +686,44 @@ mod test_compute_deltas { // Add block that is a finalized descendant. fc.proto_array - .on_block(Block { - slot: genesis_slot + 1, - root: finalized_desc, - parent_root: Some(finalized_root), - state_root, - target_root: finalized_root, - current_epoch_shuffling_id: junk_shuffling_id.clone(), - next_epoch_shuffling_id: junk_shuffling_id.clone(), - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, - execution_status, - }) + .on_block::<MainnetEthSpec>( + Block { + slot: genesis_slot + 1, + root: finalized_desc, + parent_root: Some(finalized_root), + state_root, + target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: Some(genesis_checkpoint), + unrealized_finalized_checkpoint: Some(genesis_checkpoint), + }, + genesis_slot + 1, + ) .unwrap(); // Add block that is *not* a finalized descendant. fc.proto_array - .on_block(Block { - slot: genesis_slot + 1, - root: not_finalized_desc, - parent_root: None, - state_root, - target_root: finalized_root, - current_epoch_shuffling_id: junk_shuffling_id.clone(), - next_epoch_shuffling_id: junk_shuffling_id, - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, - execution_status, - }) + .on_block::<MainnetEthSpec>( + Block { + slot: genesis_slot + 1, + root: not_finalized_desc, + parent_root: None, + state_root, + target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id, + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, + }, + genesis_slot + 1, + ) .unwrap(); assert!(!fc.is_descendant(unknown, unknown)); @@ -582,6 +755,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -594,8 +768,14 @@ mod test_compute_deltas { new_balances.push(0); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -626,6 +806,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -638,8 +819,14 @@ mod test_compute_deltas { new_balances.push(BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -677,6 +864,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -689,8 +877,14 @@ mod test_compute_deltas { new_balances.push(BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -723,6 +917,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -735,8 +930,14 @@ mod test_compute_deltas { new_balances.push(BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -774,6 +975,7 @@ mod test_compute_deltas { let mut indices = HashMap::new(); let mut votes = ElasticList::default(); + let equivocating_indices = BTreeSet::new(); // There is only one block. indices.insert(hash_from_index(1), 0); @@ -796,8 +998,14 @@ mod test_compute_deltas { next_epoch: Epoch::new(0), }); - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!(deltas.len(), 1, "deltas should have expected length"); @@ -826,6 +1034,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -838,8 +1047,14 @@ mod test_compute_deltas { new_balances.push(NEW_BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -879,6 +1094,7 @@ mod test_compute_deltas { let mut indices = HashMap::new(); let mut votes = ElasticList::default(); + let equivocating_indices = BTreeSet::new(); // There are two blocks. indices.insert(hash_from_index(1), 0); @@ -898,8 +1114,14 @@ mod test_compute_deltas { }); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!(deltas.len(), 2, "deltas should have expected length"); @@ -928,6 +1150,7 @@ mod test_compute_deltas { let mut indices = HashMap::new(); let mut votes = ElasticList::default(); + let equivocating_indices = BTreeSet::new(); // There are two blocks. indices.insert(hash_from_index(1), 0); @@ -947,8 +1170,14 @@ mod test_compute_deltas { }); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!(deltas.len(), 2, "deltas should have expected length"); @@ -969,4 +1198,72 @@ mod test_compute_deltas { ); } } + + #[test] + fn validator_equivocates() { + const OLD_BALANCE: u64 = 42; + const NEW_BALANCE: u64 = 43; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + + // There are two blocks. + indices.insert(hash_from_index(1), 0); + indices.insert(hash_from_index(2), 1); + + // There are two validators. + let old_balances = vec![OLD_BALANCE; 2]; + let new_balances = vec![NEW_BALANCE; 2]; + + // Both validator move votes from block 1 to block 2. + for _ in 0..2 { + votes.0.push(VoteTracker { + current_root: hash_from_index(1), + next_root: hash_from_index(2), + next_epoch: Epoch::new(0), + }); + } + + // Validator 0 is slashed. + let equivocating_indices = BTreeSet::from_iter([0]); + + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); + + assert_eq!(deltas.len(), 2, "deltas should have expected length"); + + assert_eq!( + deltas[0], + -2 * OLD_BALANCE as i64, + "block 1 should have lost two old balances" + ); + assert_eq!( + deltas[1], NEW_BALANCE as i64, + "block 2 should have gained one balance" + ); + + // Validator 0's current root should have been reset. + assert_eq!(votes.0[0].current_root, Hash256::zero()); + assert_eq!(votes.0[0].next_root, hash_from_index(2)); + + // Validator 1's current root should have been updated. + assert_eq!(votes.0[1].current_root, hash_from_index(2)); + + // Re-computing the deltas should be a no-op (no repeat deduction for the slashed validator). + let deltas = compute_deltas( + &indices, + &mut votes, + &new_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); + assert_eq!(deltas, vec![0, 0]); + } } diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 81e2bbe963..92b5966c9a 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -6,6 +6,7 @@ pub mod hex_vec; pub mod json_str; pub mod list_of_bytes_lists; pub mod quoted_u64_vec; +pub mod u256_hex_be; pub mod u32_hex; pub mod u64_hex_be; pub mod u8_hex; diff --git a/consensus/serde_utils/src/u256_hex_be.rs b/consensus/serde_utils/src/u256_hex_be.rs new file mode 100644 index 0000000000..8007e5792c --- /dev/null +++ b/consensus/serde_utils/src/u256_hex_be.rs @@ -0,0 +1,144 @@ +use ethereum_types::U256; + +use serde::de::Visitor; +use serde::{de, Deserializer, Serialize, Serializer}; +use std::fmt; +use std::str::FromStr; + +pub fn serialize<S>(num: &U256, serializer: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + num.serialize(serializer) +} + +pub struct U256Visitor; + +impl<'de> Visitor<'de> for U256Visitor { + type Value = String; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a well formatted hex string") + } + + fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> + where + E: de::Error, + { + if !value.starts_with("0x") { + return Err(de::Error::custom("must start with 0x")); + } + let stripped = &value[2..]; + if stripped.is_empty() { + Err(de::Error::custom(format!( + "quantity cannot be {:?}", + stripped + ))) + } else if stripped == "0" { + Ok(value.to_string()) + } else if stripped.starts_with('0') { + Err(de::Error::custom("cannot have leading zero")) + } else { + Ok(value.to_string()) + } + } +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result<U256, D::Error> +where + D: Deserializer<'de>, +{ + let decoded = deserializer.deserialize_string(U256Visitor)?; + + U256::from_str(&decoded).map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))) +} + +#[cfg(test)] +mod test { + use ethereum_types::U256; + use serde::{Deserialize, Serialize}; + use serde_json; + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct Wrapper { + #[serde(with = "super")] + val: U256, + } + + #[test] + fn encoding() { + assert_eq!( + &serde_json::to_string(&Wrapper { val: 0.into() }).unwrap(), + "\"0x0\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 1.into() }).unwrap(), + "\"0x1\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 256.into() }).unwrap(), + "\"0x100\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 65.into() }).unwrap(), + "\"0x41\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 1024.into() }).unwrap(), + "\"0x400\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: U256::max_value() - 1 + }) + .unwrap(), + "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: U256::max_value() + }) + .unwrap(), + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ); + } + + #[test] + fn decoding() { + assert_eq!( + serde_json::from_str::<Wrapper>("\"0x0\"").unwrap(), + Wrapper { val: 0.into() }, + ); + assert_eq!( + serde_json::from_str::<Wrapper>("\"0x41\"").unwrap(), + Wrapper { val: 65.into() }, + ); + assert_eq!( + serde_json::from_str::<Wrapper>("\"0x400\"").unwrap(), + Wrapper { val: 1024.into() }, + ); + assert_eq!( + serde_json::from_str::<Wrapper>( + "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" + ) + .unwrap(), + Wrapper { + val: U256::max_value() - 1 + }, + ); + assert_eq!( + serde_json::from_str::<Wrapper>( + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ) + .unwrap(), + Wrapper { + val: U256::max_value() + }, + ); + serde_json::from_str::<Wrapper>("\"0x\"").unwrap_err(); + serde_json::from_str::<Wrapper>("\"0x0400\"").unwrap_err(); + serde_json::from_str::<Wrapper>("\"400\"").unwrap_err(); + serde_json::from_str::<Wrapper>("\"ff\"").unwrap_err(); + } +} diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml index 7ba3e0678c..a153c2efc1 100644 --- a/consensus/ssz/Cargo.toml +++ b/consensus/ssz/Cargo.toml @@ -14,7 +14,8 @@ eth2_ssz_derive = "0.3.0" [dependencies] ethereum-types = "0.12.1" -smallvec = "1.6.1" +smallvec = { version = "1.6.1", features = ["const_generics"] } +itertools = "0.10.3" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/ssz/src/decode.rs b/consensus/ssz/src/decode.rs index 604cc68d7b..10b3573b16 100644 --- a/consensus/ssz/src/decode.rs +++ b/consensus/ssz/src/decode.rs @@ -5,6 +5,7 @@ use std::cmp::Ordering; type SmallVec8<T> = SmallVec<[T; 8]>; pub mod impls; +pub mod try_from_iter; /// Returned when SSZ decoding fails. #[derive(Debug, PartialEq, Clone)] diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index 0e6b390830..d91ddabe02 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -1,7 +1,11 @@ use super::*; +use crate::decode::try_from_iter::{TryCollect, TryFromIter}; use core::num::NonZeroUsize; use ethereum_types::{H160, H256, U128, U256}; +use itertools::process_results; use smallvec::SmallVec; +use std::collections::{BTreeMap, BTreeSet}; +use std::iter::{self, FromIterator}; use std::sync::Arc; macro_rules! impl_decodable_for_uint { @@ -380,14 +384,14 @@ macro_rules! impl_for_vec { fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { if bytes.is_empty() { - Ok(vec![].into()) + Ok(Self::from_iter(iter::empty())) } else if T::is_ssz_fixed_len() { bytes .chunks(T::ssz_fixed_len()) - .map(|chunk| T::from_ssz_bytes(chunk)) + .map(T::from_ssz_bytes) .collect() } else { - decode_list_of_variable_length_items(bytes, $max_len).map(|vec| vec.into()) + decode_list_of_variable_length_items(bytes, $max_len) } } } @@ -395,26 +399,73 @@ macro_rules! impl_for_vec { } impl_for_vec!(Vec<T>, None); -impl_for_vec!(SmallVec<[T; 1]>, Some(1)); -impl_for_vec!(SmallVec<[T; 2]>, Some(2)); -impl_for_vec!(SmallVec<[T; 3]>, Some(3)); -impl_for_vec!(SmallVec<[T; 4]>, Some(4)); -impl_for_vec!(SmallVec<[T; 5]>, Some(5)); -impl_for_vec!(SmallVec<[T; 6]>, Some(6)); -impl_for_vec!(SmallVec<[T; 7]>, Some(7)); -impl_for_vec!(SmallVec<[T; 8]>, Some(8)); +impl_for_vec!(SmallVec<[T; 1]>, None); +impl_for_vec!(SmallVec<[T; 2]>, None); +impl_for_vec!(SmallVec<[T; 3]>, None); +impl_for_vec!(SmallVec<[T; 4]>, None); +impl_for_vec!(SmallVec<[T; 5]>, None); +impl_for_vec!(SmallVec<[T; 6]>, None); +impl_for_vec!(SmallVec<[T; 7]>, None); +impl_for_vec!(SmallVec<[T; 8]>, None); + +impl<K, V> Decode for BTreeMap<K, V> +where + K: Decode + Ord, + V: Decode, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { + if bytes.is_empty() { + Ok(Self::from_iter(iter::empty())) + } else if <(K, V)>::is_ssz_fixed_len() { + bytes + .chunks(<(K, V)>::ssz_fixed_len()) + .map(<(K, V)>::from_ssz_bytes) + .collect() + } else { + decode_list_of_variable_length_items(bytes, None) + } + } +} + +impl<T> Decode for BTreeSet<T> +where + T: Decode + Ord, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { + if bytes.is_empty() { + Ok(Self::from_iter(iter::empty())) + } else if T::is_ssz_fixed_len() { + bytes + .chunks(T::ssz_fixed_len()) + .map(T::from_ssz_bytes) + .collect() + } else { + decode_list_of_variable_length_items(bytes, None) + } + } +} /// Decodes `bytes` as if it were a list of variable-length items. /// -/// The `ssz::SszDecoder` can also perform this functionality, however it it significantly faster -/// as it is optimized to read same-typed items whilst `ssz::SszDecoder` supports reading items of -/// differing types. -pub fn decode_list_of_variable_length_items<T: Decode>( +/// The `ssz::SszDecoder` can also perform this functionality, however this function is +/// significantly faster as it is optimized to read same-typed items whilst `ssz::SszDecoder` +/// supports reading items of differing types. +pub fn decode_list_of_variable_length_items<T: Decode, Container: TryFromIter<T>>( bytes: &[u8], max_len: Option<usize>, -) -> Result<Vec<T>, DecodeError> { +) -> Result<Container, DecodeError> { if bytes.is_empty() { - return Ok(vec![]); + return Container::try_from_iter(iter::empty()).map_err(|e| { + DecodeError::BytesInvalid(format!("Error trying to collect empty list: {:?}", e)) + }); } let first_offset = read_offset(bytes)?; @@ -433,35 +484,27 @@ pub fn decode_list_of_variable_length_items<T: Decode>( ))); } - // Only initialize the vec with a capacity if a maximum length is provided. - // - // We assume that if a max length is provided then the application is able to handle an - // allocation of this size. - let mut values = if max_len.is_some() { - Vec::with_capacity(num_items) - } else { - vec![] - }; - let mut offset = first_offset; - for i in 1..=num_items { - let slice_option = if i == num_items { - bytes.get(offset..) - } else { - let start = offset; + process_results( + (1..=num_items).map(|i| { + let slice_option = if i == num_items { + bytes.get(offset..) + } else { + let start = offset; - let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; - offset = sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; + let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; + offset = + sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; - bytes.get(start..offset) - }; + bytes.get(start..offset) + }; - let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?; - - values.push(T::from_ssz_bytes(slice)?); - } - - Ok(values) + let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?; + T::from_ssz_bytes(slice) + }), + |iter| iter.try_collect(), + )? + .map_err(|e| DecodeError::BytesInvalid(format!("Error collecting into container: {:?}", e))) } #[cfg(test)] diff --git a/consensus/ssz/src/decode/try_from_iter.rs b/consensus/ssz/src/decode/try_from_iter.rs new file mode 100644 index 0000000000..22db02d4fc --- /dev/null +++ b/consensus/ssz/src/decode/try_from_iter.rs @@ -0,0 +1,96 @@ +use smallvec::SmallVec; +use std::collections::{BTreeMap, BTreeSet}; +use std::convert::Infallible; +use std::fmt::Debug; + +/// Partial variant of `std::iter::FromIterator`. +/// +/// This trait is implemented for types which can be constructed from an iterator of decoded SSZ +/// values, but which may refuse values once a length limit is reached. +pub trait TryFromIter<T>: Sized { + type Error: Debug; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = T>; +} + +// It would be nice to be able to do a blanket impl, e.g. +// +// `impl TryFromIter<T> for C where C: FromIterator<T>` +// +// However this runs into trait coherence issues due to the type parameter `T` on `TryFromIter`. +// +// E.g. If we added an impl downstream for `List<T, N>` then another crate downstream of that +// could legally add an impl of `FromIterator<Local> for List<Local, N>` which would create +// two conflicting implementations for `List<Local, N>`. Hence the `List<T, N>` impl is disallowed +// by the compiler in the presence of the blanket impl. That's obviously annoying, so we opt to +// abandon the blanket impl in favour of impls for selected types. +impl<T> TryFromIter<T> for Vec<T> { + type Error = Infallible; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = T>, + { + Ok(Self::from_iter(iter)) + } +} + +impl<T, const N: usize> TryFromIter<T> for SmallVec<[T; N]> { + type Error = Infallible; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = T>, + { + Ok(Self::from_iter(iter)) + } +} + +impl<K, V> TryFromIter<(K, V)> for BTreeMap<K, V> +where + K: Ord, +{ + type Error = Infallible; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = (K, V)>, + { + Ok(Self::from_iter(iter)) + } +} + +impl<T> TryFromIter<T> for BTreeSet<T> +where + T: Ord, +{ + type Error = Infallible; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = T>, + { + Ok(Self::from_iter(iter)) + } +} + +/// Partial variant of `collect`. +pub trait TryCollect: Iterator { + fn try_collect<C>(self) -> Result<C, C::Error> + where + C: TryFromIter<Self::Item>; +} + +impl<I> TryCollect for I +where + I: Iterator, +{ + fn try_collect<C>(self) -> Result<C, C::Error> + where + C: TryFromIter<Self::Item>, + { + C::try_from_iter(self) + } +} diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index 5728685d01..cfd95ba40d 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -2,6 +2,7 @@ use super::*; use core::num::NonZeroUsize; use ethereum_types::{H160, H256, U128, U256}; use smallvec::SmallVec; +use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; macro_rules! impl_encodable_for_uint { @@ -220,6 +221,65 @@ impl<T: Encode> Encode for Arc<T> { } } +// Encode transparently through references. +impl<'a, T: Encode> Encode for &'a T { + fn is_ssz_fixed_len() -> bool { + T::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + T::ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec<u8>) { + T::ssz_append(self, buf) + } + + fn ssz_bytes_len(&self) -> usize { + T::ssz_bytes_len(self) + } +} + +/// Compute the encoded length of a vector-like sequence of `T`. +pub fn sequence_ssz_bytes_len<I, T>(iter: I) -> usize +where + I: Iterator<Item = T> + ExactSizeIterator, + T: Encode, +{ + // Compute length before doing any iteration. + let length = iter.len(); + if <T as Encode>::is_ssz_fixed_len() { + <T as Encode>::ssz_fixed_len() * length + } else { + let mut len = iter.map(|item| item.ssz_bytes_len()).sum(); + len += BYTES_PER_LENGTH_OFFSET * length; + len + } +} + +/// Encode a vector-like sequence of `T`. +pub fn sequence_ssz_append<I, T>(iter: I, buf: &mut Vec<u8>) +where + I: Iterator<Item = T> + ExactSizeIterator, + T: Encode, +{ + if T::is_ssz_fixed_len() { + buf.reserve(T::ssz_fixed_len() * iter.len()); + + for item in iter { + item.ssz_append(buf); + } + } else { + let mut encoder = SszEncoder::container(buf, iter.len() * BYTES_PER_LENGTH_OFFSET); + + for item in iter { + encoder.append(&item); + } + + encoder.finalize(); + } +} + macro_rules! impl_for_vec { ($type: ty) => { impl<T: Encode> Encode for $type { @@ -228,32 +288,11 @@ macro_rules! impl_for_vec { } fn ssz_bytes_len(&self) -> usize { - if <T as Encode>::is_ssz_fixed_len() { - <T as Encode>::ssz_fixed_len() * self.len() - } else { - let mut len = self.iter().map(|item| item.ssz_bytes_len()).sum(); - len += BYTES_PER_LENGTH_OFFSET * self.len(); - len - } + sequence_ssz_bytes_len(self.iter()) } fn ssz_append(&self, buf: &mut Vec<u8>) { - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * self.len()); - - for item in self { - item.ssz_append(buf); - } - } else { - let mut encoder = - SszEncoder::container(buf, self.len() * BYTES_PER_LENGTH_OFFSET); - - for item in self { - encoder.append(item); - } - - encoder.finalize(); - } + sequence_ssz_append(self.iter(), buf) } } }; @@ -269,6 +308,41 @@ impl_for_vec!(SmallVec<[T; 6]>); impl_for_vec!(SmallVec<[T; 7]>); impl_for_vec!(SmallVec<[T; 8]>); +impl<K, V> Encode for BTreeMap<K, V> +where + K: Encode + Ord, + V: Encode, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_bytes_len(&self) -> usize { + sequence_ssz_bytes_len(self.iter()) + } + + fn ssz_append(&self, buf: &mut Vec<u8>) { + sequence_ssz_append(self.iter(), buf) + } +} + +impl<T> Encode for BTreeSet<T> +where + T: Encode + Ord, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_bytes_len(&self) -> usize { + sequence_ssz_bytes_len(self.iter()) + } + + fn ssz_append(&self, buf: &mut Vec<u8>) { + sequence_ssz_append(self.iter(), buf) + } +} + impl Encode for bool { fn is_ssz_fixed_len() -> bool { true diff --git a/consensus/ssz/src/lib.rs b/consensus/ssz/src/lib.rs index df00c514e2..e71157a3ee 100644 --- a/consensus/ssz/src/lib.rs +++ b/consensus/ssz/src/lib.rs @@ -40,8 +40,8 @@ pub mod legacy; mod union_selector; pub use decode::{ - impls::decode_list_of_variable_length_items, read_offset, split_union_bytes, Decode, - DecodeError, SszDecoder, SszDecoderBuilder, + impls::decode_list_of_variable_length_items, read_offset, split_union_bytes, + try_from_iter::TryFromIter, Decode, DecodeError, SszDecoder, SszDecoderBuilder, }; pub use encode::{encode_length, Encode, SszEncoder}; pub use union_selector::UnionSelector; diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs index 7bd6252ad0..e41fc15dd4 100644 --- a/consensus/ssz/tests/tests.rs +++ b/consensus/ssz/tests/tests.rs @@ -4,6 +4,8 @@ use ssz_derive::{Decode, Encode}; mod round_trip { use super::*; + use std::collections::BTreeMap; + use std::iter::FromIterator; fn round_trip<T: Encode + Decode + std::fmt::Debug + PartialEq>(items: Vec<T>) { for item in items { @@ -321,6 +323,52 @@ mod round_trip { round_trip(vec); } + + #[test] + fn btree_map_fixed() { + let data = vec![ + BTreeMap::new(), + BTreeMap::from_iter(vec![(0u8, 0u16), (1, 2), (2, 4), (4, 6)]), + ]; + round_trip(data); + } + + #[test] + fn btree_map_variable_value() { + let data = vec![ + BTreeMap::new(), + BTreeMap::from_iter(vec![ + ( + 0u64, + ThreeVariableLen { + a: 1, + b: vec![3, 5, 7], + c: vec![], + d: vec![0, 0], + }, + ), + ( + 1, + ThreeVariableLen { + a: 99, + b: vec![1], + c: vec![2, 3, 4, 5, 6, 7, 8, 9, 10], + d: vec![4, 5, 6, 7, 8], + }, + ), + ( + 2, + ThreeVariableLen { + a: 0, + b: vec![], + c: vec![], + d: vec![], + }, + ), + ]), + ]; + round_trip(data); + } } mod derive_macro { diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 1414d12c8c..5acf74608a 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -255,7 +255,8 @@ where }) .map(Into::into) } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len)).map(|vec| vec.into()) + ssz::decode_list_of_variable_length_items(bytes, Some(max_len)) + .map(|vec: Vec<_>| vec.into()) } } } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 2daefdacad..2a84d1d2d2 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -394,7 +394,7 @@ async fn invalid_attestation_no_committee_for_index() { &spec, ); - // Expecting NoCommitee because we manually set the attestation's index to be invalid + // Expecting NoCommittee because we manually set the attestation's index to be invalid assert_eq!( result, Err(BlockProcessingError::AttestationInvalid { @@ -471,7 +471,7 @@ async fn invalid_attestation_bad_aggregation_bitfield_len() { &spec, ); - // Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the commitee size. + // Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the committee size. assert_eq!( result, Err(BlockProcessingError::BeaconStateError( diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index d813dc42fa..cb90c67b56 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -2,6 +2,7 @@ pub use epoch_processing_summary::EpochProcessingSummary; use errors::EpochProcessingError as Error; +pub use justification_and_finalization_state::JustificationAndFinalizationState; pub use registry_updates::process_registry_updates; use safe_arith::SafeArith; pub use slashings::process_slashings; @@ -14,6 +15,7 @@ pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; pub mod historical_roots_update; +pub mod justification_and_finalization_state; pub mod registry_updates; pub mod resets; pub mod slashings; diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index 1011abe28f..d5df2fc975 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -33,7 +33,9 @@ pub fn process_epoch<T: EthSpec>( let sync_committee = state.current_sync_committee()?.clone(); // Justification and finalization. - process_justification_and_finalization(state, &participation_cache)?; + let justification_and_finalization_state = + process_justification_and_finalization(state, &participation_cache)?; + justification_and_finalization_state.apply_changes_to_state(state); process_inactivity_updates(state, &participation_cache, spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs index f47d9c0e68..1f17cf56e0 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs @@ -1,17 +1,21 @@ use super::ParticipationCache; -use crate::per_epoch_processing::weigh_justification_and_finalization; use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::{ + weigh_justification_and_finalization, JustificationAndFinalizationState, +}; use safe_arith::SafeArith; use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; use types::{BeaconState, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. pub fn process_justification_and_finalization<T: EthSpec>( - state: &mut BeaconState<T>, + state: &BeaconState<T>, participation_cache: &ParticipationCache, -) -> Result<(), Error> { +) -> Result<JustificationAndFinalizationState<T>, Error> { + let justification_and_finalization_state = JustificationAndFinalizationState::new(state); + if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { - return Ok(()); + return Ok(justification_and_finalization_state); } let previous_epoch = state.previous_epoch(); @@ -24,7 +28,7 @@ pub fn process_justification_and_finalization<T: EthSpec>( let previous_target_balance = previous_indices.total_balance()?; let current_target_balance = current_indices.total_balance()?; weigh_justification_and_finalization( - state, + justification_and_finalization_state, total_active_balance, previous_target_balance, current_target_balance, diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index 4ae2207ff2..cb7e7d4b30 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -31,7 +31,9 @@ pub fn process_epoch<T: EthSpec>( validator_statuses.process_attestations(state)?; // Justification and finalization. - process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; + let justification_and_finalization_state = + process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; + justification_and_finalization_state.apply_changes_to_state(state); // Rewards and Penalties. process_rewards_and_penalties(state, &mut validator_statuses, spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs index 89fb506eec..9792b54507 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs @@ -1,21 +1,25 @@ use crate::per_epoch_processing::base::TotalBalances; -use crate::per_epoch_processing::weigh_justification_and_finalization; use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::{ + weigh_justification_and_finalization, JustificationAndFinalizationState, +}; use safe_arith::SafeArith; use types::{BeaconState, ChainSpec, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. pub fn process_justification_and_finalization<T: EthSpec>( - state: &mut BeaconState<T>, + state: &BeaconState<T>, total_balances: &TotalBalances, _spec: &ChainSpec, -) -> Result<(), Error> { +) -> Result<JustificationAndFinalizationState<T>, Error> { + let justification_and_finalization_state = JustificationAndFinalizationState::new(state); + if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { - return Ok(()); + return Ok(justification_and_finalization_state); } weigh_justification_and_finalization( - state, + justification_and_finalization_state, total_balances.current_epoch(), total_balances.previous_epoch_target_attesters(), total_balances.current_epoch_target_attesters(), diff --git a/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs new file mode 100644 index 0000000000..d8a641f464 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs @@ -0,0 +1,115 @@ +use types::{BeaconState, BeaconStateError, BitVector, Checkpoint, Epoch, EthSpec, Hash256}; + +/// This is a subset of the `BeaconState` which is used to compute justification and finality +/// without modifying the `BeaconState`. +/// +/// A `JustificationAndFinalizationState` can be created from a `BeaconState` to compute +/// justification/finality changes and then applied to a `BeaconState` to enshrine those changes. +#[must_use = "this value must be applied to a state or explicitly dropped"] +pub struct JustificationAndFinalizationState<T: EthSpec> { + /* + * Immutable fields. + */ + previous_epoch: Epoch, + previous_epoch_target_root: Result<Hash256, BeaconStateError>, + current_epoch: Epoch, + current_epoch_target_root: Result<Hash256, BeaconStateError>, + /* + * Mutable fields. + */ + previous_justified_checkpoint: Checkpoint, + current_justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + justification_bits: BitVector<T::JustificationBitsLength>, +} + +impl<T: EthSpec> JustificationAndFinalizationState<T> { + pub fn new(state: &BeaconState<T>) -> Self { + let previous_epoch = state.previous_epoch(); + let current_epoch = state.current_epoch(); + Self { + previous_epoch, + previous_epoch_target_root: state.get_block_root_at_epoch(previous_epoch).copied(), + current_epoch, + current_epoch_target_root: state.get_block_root_at_epoch(current_epoch).copied(), + previous_justified_checkpoint: state.previous_justified_checkpoint(), + current_justified_checkpoint: state.current_justified_checkpoint(), + finalized_checkpoint: state.finalized_checkpoint(), + justification_bits: state.justification_bits().clone(), + } + } + + pub fn apply_changes_to_state(self, state: &mut BeaconState<T>) { + let Self { + /* + * Immutable fields do not need to be used. + */ + previous_epoch: _, + previous_epoch_target_root: _, + current_epoch: _, + current_epoch_target_root: _, + /* + * Mutable fields *must* be used. + */ + previous_justified_checkpoint, + current_justified_checkpoint, + finalized_checkpoint, + justification_bits, + } = self; + + *state.previous_justified_checkpoint_mut() = previous_justified_checkpoint; + *state.current_justified_checkpoint_mut() = current_justified_checkpoint; + *state.finalized_checkpoint_mut() = finalized_checkpoint; + *state.justification_bits_mut() = justification_bits; + } + + pub fn previous_epoch(&self) -> Epoch { + self.previous_epoch + } + + pub fn current_epoch(&self) -> Epoch { + self.current_epoch + } + + pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<Hash256, BeaconStateError> { + if epoch == self.previous_epoch { + self.previous_epoch_target_root.clone() + } else if epoch == self.current_epoch { + self.current_epoch_target_root.clone() + } else { + Err(BeaconStateError::SlotOutOfBounds) + } + } + + pub fn previous_justified_checkpoint(&self) -> Checkpoint { + self.previous_justified_checkpoint + } + + pub fn previous_justified_checkpoint_mut(&mut self) -> &mut Checkpoint { + &mut self.previous_justified_checkpoint + } + + pub fn current_justified_checkpoint_mut(&mut self) -> &mut Checkpoint { + &mut self.current_justified_checkpoint + } + + pub fn current_justified_checkpoint(&self) -> Checkpoint { + self.current_justified_checkpoint + } + + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.finalized_checkpoint + } + + pub fn finalized_checkpoint_mut(&mut self) -> &mut Checkpoint { + &mut self.finalized_checkpoint + } + + pub fn justification_bits(&self) -> &BitVector<T::JustificationBitsLength> { + &self.justification_bits + } + + pub fn justification_bits_mut(&mut self) -> &mut BitVector<T::JustificationBitsLength> { + &mut self.justification_bits + } +} diff --git a/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs index 6e90ee8f37..96f6a8ef14 100644 --- a/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs @@ -1,16 +1,16 @@ -use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::{Error, JustificationAndFinalizationState}; use safe_arith::SafeArith; use std::ops::Range; -use types::{BeaconState, Checkpoint, EthSpec}; +use types::{Checkpoint, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. #[allow(clippy::if_same_then_else)] // For readability and consistency with spec. pub fn weigh_justification_and_finalization<T: EthSpec>( - state: &mut BeaconState<T>, + mut state: JustificationAndFinalizationState<T>, total_active_balance: u64, previous_target_balance: u64, current_target_balance: u64, -) -> Result<(), Error> { +) -> Result<JustificationAndFinalizationState<T>, Error> { let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); @@ -24,7 +24,7 @@ pub fn weigh_justification_and_finalization<T: EthSpec>( if previous_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { *state.current_justified_checkpoint_mut() = Checkpoint { epoch: previous_epoch, - root: *state.get_block_root_at_epoch(previous_epoch)?, + root: state.get_block_root_at_epoch(previous_epoch)?, }; state.justification_bits_mut().set(1, true)?; } @@ -32,7 +32,7 @@ pub fn weigh_justification_and_finalization<T: EthSpec>( if current_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { *state.current_justified_checkpoint_mut() = Checkpoint { epoch: current_epoch, - root: *state.get_block_root_at_epoch(current_epoch)?, + root: state.get_block_root_at_epoch(current_epoch)?, }; state.justification_bits_mut().set(0, true)?; } @@ -66,5 +66,5 @@ pub fn weigh_justification_and_finalization<T: EthSpec>( *state.finalized_checkpoint_mut() = old_current_justified_checkpoint; } - Ok(()) + Ok(state) } diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 1726f2ad07..047bceae7e 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,13 +1,14 @@ -use crate::{EthSpec, ExecPayload, ExecutionPayloadHeader, Uint256}; -use bls::blst_implementations::PublicKeyBytes; +use crate::{ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, Uint256}; +use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; use serde_derive::{Deserialize, Serialize}; use serde_with::{serde_as, DeserializeAs, SerializeAs}; use std::marker::PhantomData; +use tree_hash_derive::TreeHash; #[serde_as] -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload<E>")] pub struct BuilderBid<E: EthSpec, Payload: ExecPayload<E>> { #[serde_as(as = "BlindedPayloadAsHeader<E>")] @@ -16,9 +17,12 @@ pub struct BuilderBid<E: EthSpec, Payload: ExecPayload<E>> { pub value: Uint256, pub pubkey: PublicKeyBytes, #[serde(skip)] + #[tree_hash(skip_hashing)] _phantom_data: PhantomData<E>, } +impl<E: EthSpec, Payload: ExecPayload<E>> SignedRoot for BuilderBid<E, Payload> {} + /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload<E>")] @@ -50,3 +54,17 @@ impl<'de, E: EthSpec, Payload: ExecPayload<E>> DeserializeAs<'de, Payload> .map_err(|_| serde::de::Error::custom("unable to convert payload header to payload")) } } + +impl<E: EthSpec, Payload: ExecPayload<E>> SignedBuilderBid<E, Payload> { + pub fn verify_signature(&self, spec: &ChainSpec) -> bool { + self.message + .pubkey + .decompress() + .map(|pubkey| { + let domain = spec.get_builder_domain(); + let message = self.message.signing_root(domain); + self.signature.verify(&pubkey, message) + }) + .unwrap_or(false) + } +} diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 8a69505a51..3668d0524c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1355,4 +1355,12 @@ mod yaml_tests { ) ); } + + #[test] + fn test_domain_builder() { + assert_eq!( + int_to_bytes4(ApplicationDomain::Builder.get_domain_constant()), + [0, 0, 0, 1] + ); + } } diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index dbfe218159..978bd4c69a 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -1,12 +1,14 @@ use crate::test_utils::TestRandom; use crate::Hash256; +use derivative::Derivative; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] +#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] +#[derivative(Debug = "transparent")] #[serde(transparent)] pub struct ExecutionBlockHash(Hash256); diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index a21eeb63c2..114ca02ecf 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -9,6 +9,7 @@ use std::hash::Hash; use test_random_derive::TestRandom; use tree_hash::TreeHash; +#[derive(Debug)] pub enum BlockType { Blinded, Full, @@ -18,6 +19,7 @@ pub trait ExecPayload<T: EthSpec>: Debug + Clone + Encode + + Debug + Decode + TestRandom + TreeHash @@ -44,6 +46,8 @@ pub trait ExecPayload<T: EthSpec>: fn block_number(&self) -> u64; fn timestamp(&self) -> u64; fn block_hash(&self) -> ExecutionBlockHash; + fn fee_recipient(&self) -> Address; + fn gas_limit(&self) -> u64; } impl<T: EthSpec> ExecPayload<T> for FullPayload<T> { @@ -74,6 +78,14 @@ impl<T: EthSpec> ExecPayload<T> for FullPayload<T> { fn block_hash(&self) -> ExecutionBlockHash { self.execution_payload.block_hash } + + fn fee_recipient(&self) -> Address { + self.execution_payload.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.execution_payload.gas_limit + } } impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> { @@ -104,6 +116,14 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> { fn block_hash(&self) -> ExecutionBlockHash { self.execution_payload_header.block_hash } + + fn fee_recipient(&self) -> Address { + self.execution_payload_header.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.execution_payload_header.gas_limit + } } #[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 55135a8a26..43396dedc0 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -129,6 +129,7 @@ macro_rules! impl_test_random_for_u8_array { }; } +impl_test_random_for_u8_array!(3); impl_test_random_for_u8_array!(4); impl_test_random_for_u8_array!(32); impl_test_random_for_u8_array!(48); diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index 7490ab6093..eb92d252d1 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -8,9 +8,9 @@ description = "Hashing primitives used in Ethereum 2.0" [dependencies] lazy_static = { version = "1.4.0", optional = true } +cpufeatures = { version = "0.2.2", optional = true } ring = "0.16.19" sha2 = "0.10.2" -cpufeatures = "0.2.2" [dev-dependencies] rustc-hex = "2.1.0" @@ -19,5 +19,6 @@ rustc-hex = "2.1.0" wasm-bindgen-test = "0.3.18" [features] -default = ["zero_hash_cache"] +default = ["zero_hash_cache", "detect-cpufeatures"] zero_hash_cache = ["lazy_static"] +detect-cpufeatures = ["cpufeatures"] diff --git a/crypto/eth2_hashing/src/lib.rs b/crypto/eth2_hashing/src/lib.rs index c5c034640b..36a3d14139 100644 --- a/crypto/eth2_hashing/src/lib.rs +++ b/crypto/eth2_hashing/src/lib.rs @@ -127,15 +127,15 @@ pub enum DynamicImpl { // Runtime latch for detecting the availability of SHA extensions on x86_64. // // Inspired by the runtime switch within the `sha2` crate itself. -#[cfg(target_arch = "x86_64")] +#[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] cpufeatures::new!(x86_sha_extensions, "sha", "sse2", "ssse3", "sse4.1"); #[inline(always)] pub fn have_sha_extensions() -> bool { - #[cfg(target_arch = "x86_64")] + #[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] return x86_sha_extensions::get(); - #[cfg(not(target_arch = "x86_64"))] + #[cfg(not(all(feature = "detect-cpufeatures", target_arch = "x86_64")))] return false; } diff --git a/crypto/eth2_keystore/src/json_keystore/kdf_module.rs b/crypto/eth2_keystore/src/json_keystore/kdf_module.rs index a1295e859c..94aeab0682 100644 --- a/crypto/eth2_keystore/src/json_keystore/kdf_module.rs +++ b/crypto/eth2_keystore/src/json_keystore/kdf_module.rs @@ -58,9 +58,10 @@ impl Kdf { } /// PRF for use in `pbkdf2`. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Default)] pub enum Prf { #[serde(rename = "hmac-sha256")] + #[default] HmacSha256, } @@ -73,12 +74,6 @@ impl Prf { } } -impl Default for Prf { - fn default() -> Self { - Prf::HmacSha256 - } -} - /// Parameters for `pbkdf2` key derivation. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 037171097d..dfc8aac7bd 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.3.2-rc.0" +version = "2.5.0" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" @@ -37,3 +37,4 @@ web3 = { version = "0.18.0", default-features = false, features = ["http-tls", " eth1_test_rig = { path = "../testing/eth1_test_rig" } sensitive_url = { path = "../common/sensitive_url" } eth2 = { path = "../common/eth2" } +snap = "1.0.1" diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 255f96eec1..2a0e5a9d47 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.58.1-bullseye AS builder +FROM rust:1.62.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake COPY . lighthouse ARG PORTABLE diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index 3f272780db..5d988ee181 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -1,7 +1,9 @@ use clap::ArgMatches; use clap_utils::parse_required; use serde::Serialize; +use snap::raw::Decoder; use ssz::Decode; +use std::fs; use std::fs::File; use std::io::Read; use std::str::FromStr; @@ -29,11 +31,18 @@ pub fn run_parse_ssz<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> { let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; let format = parse_required(matches, "format")?; - let mut bytes = vec![]; - let mut file = - File::open(filename).map_err(|e| format!("Unable to open {}: {}", filename, e))?; - file.read_to_end(&mut bytes) - .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + let bytes = if filename.ends_with("ssz_snappy") { + let bytes = fs::read(filename).unwrap(); + let mut decoder = Decoder::new(); + decoder.decompress_vec(&bytes).unwrap() + } else { + let mut bytes = vec![]; + let mut file = + File::open(filename).map_err(|e| format!("Unable to open {}: {}", filename, e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + bytes + }; info!("Using {} spec", T::spec_name()); info!("Type: {:?}", type_str); diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index f7742ef0b9..da4ca81884 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "lighthouse" -version = "2.3.2-rc.0" +version = "2.5.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false -rust-version = "1.58" +rust-version = "1.62" [features] # Writes debugging .ssz files to /tmp during block processing. diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 06b0303c69..696830a0d1 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -494,6 +494,8 @@ fn validator_import_launchpad() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -614,6 +616,8 @@ fn validator_import_launchpad_no_password_then_add_password() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -638,6 +642,8 @@ fn validator_import_launchpad_no_password_then_add_password() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: dst_keystore_dir.join(KEYSTORE_NAME), @@ -738,6 +744,8 @@ fn validator_import_launchpad_password_file() { voting_public_key: keystore.public_key().unwrap(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index a9f8900d0c..0236ba6589 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -132,6 +132,37 @@ fn fork_choice_before_proposal_timeout_zero() { .with_config(|config| assert_eq!(config.chain.fork_choice_before_proposal_timeout_ms, 0)); } +#[test] +fn count_unrealized_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_no_arg() { + CommandLineTest::new() + .flag("count-unrealized", None) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_false() { + CommandLineTest::new() + .flag("count-unrealized", Some("false")) + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_true() { + CommandLineTest::new() + .flag("count-unrealized", Some("true")) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.count_unrealized)); +} + #[test] fn freezer_dir_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); @@ -394,25 +425,36 @@ fn merge_fee_recipient_flag() { fn run_payload_builder_flag_test(flag: &str, builders: &str) { use sensitive_url::SensitiveUrl; - let dir = TempDir::new().expect("Unable to create temporary directory"); let all_builders: Vec<_> = builders .split(",") .map(|builder| SensitiveUrl::parse(builder).expect("valid builder url")) .collect(); - CommandLineTest::new() - .flag("execution-endpoint", Some("http://meow.cats")) + run_payload_builder_flag_test_with_config(flag, builders, None, None, |config| { + let config = config.execution_layer.as_ref().unwrap(); + // Only first provided endpoint is parsed as we don't support + // redundancy. + assert_eq!(config.builder_url, all_builders.get(0).cloned()); + }) +} +fn run_payload_builder_flag_test_with_config<F: Fn(&Config)>( + flag: &str, + builders: &str, + additional_flag: Option<&str>, + additional_flag_value: Option<&str>, + f: F, +) { + let dir = TempDir::new().expect("Unable to create temporary directory"); + let mut test = CommandLineTest::new(); + test.flag("execution-endpoint", Some("http://meow.cats")) .flag( "execution-jwt", dir.path().join("jwt-file").as_os_str().to_str(), ) - .flag(flag, Some(builders)) - .run_with_zero_port() - .with_config(|config| { - let config = config.execution_layer.as_ref().unwrap(); - // Only first provided endpoint is parsed as we don't support - // redundancy. - assert_eq!(config.builder_url, all_builders.get(0).cloned()); - }); + .flag(flag, Some(builders)); + if let Some(additional_flag_name) = additional_flag { + test.flag(additional_flag_name, additional_flag_value); + } + test.run_with_zero_port().with_config(f); } #[test] @@ -420,7 +462,46 @@ fn payload_builder_flags() { run_payload_builder_flag_test("builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); - run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); +} + +#[test] +fn builder_fallback_flags() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-skips"), + Some("7"), + |config| { + assert_eq!(config.chain.builder_fallback_skips, 7); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-skips-per-epoch"), + Some("11"), + |config| { + assert_eq!(config.chain.builder_fallback_skips_per_epoch, 11); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-epochs-since-finalization"), + Some("4"), + |config| { + assert_eq!(config.chain.builder_fallback_epochs_since_finalization, 4); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-disable-checks"), + None, + |config| { + assert_eq!(config.chain.builder_fallback_disable_checks, true); + }, + ); } fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 61c239f86d..21dc4d7872 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -249,66 +249,6 @@ fn fee_recipient_flag() { ) }); } -#[test] -fn fee_recipient_file_flag() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - let mut file = - File::create(dir.path().join("fee_recipient.txt")).expect("Unable to create file"); - let new_key = Keypair::random(); - let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = "default:0x00000000219ab540356cbb839cbe05303d7705fa"; - file.write_all(contents.as_bytes()) - .expect("Unable to write to file"); - CommandLineTest::new() - .flag( - "suggested-fee-recipient-file", - dir.path().join("fee_recipient.txt").as_os_str().to_str(), - ) - .run() - .with_config(|config| { - // Public key not present so load default. - assert_eq!( - config - .fee_recipient_file - .clone() - .unwrap() - .load_fee_recipient(&pubkeybytes) - .unwrap(), - Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) - ) - }); -} -#[test] -fn fee_recipient_file_with_pk_flag() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - let mut file = - File::create(dir.path().join("fee_recipient.txt")).expect("Unable to create file"); - let new_key = Keypair::random(); - let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = format!( - "{}:0x00000000219ab540356cbb839cbe05303d7705fa", - pubkeybytes.to_string() - ); - file.write_all(contents.as_bytes()) - .expect("Unable to write to file"); - CommandLineTest::new() - .flag( - "suggested-fee-recipient-file", - dir.path().join("fee_recipient.txt").as_os_str().to_str(), - ) - .run() - .with_config(|config| { - assert_eq!( - config - .fee_recipient_file - .clone() - .unwrap() - .load_fee_recipient(&pubkeybytes) - .unwrap(), - Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) - ) - }); -} // Tests for HTTP flags. #[test] @@ -448,3 +388,58 @@ fn no_doppelganger_protection_flag() { .run() .with_config(|config| assert!(!config.enable_doppelganger_protection)); } +#[test] +fn no_gas_limit_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(config.gas_limit.is_none())); +} +#[test] +fn gas_limit_flag() { + CommandLineTest::new() + .flag("gas-limit", Some("600")) + .flag("builder-proposals", None) + .run() + .with_config(|config| assert_eq!(config.gas_limit, Some(600))); +} +#[test] +fn no_builder_proposals_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.builder_proposals)); +} +#[test] +fn builder_proposals_flag() { + CommandLineTest::new() + .flag("builder-proposals", None) + .run() + .with_config(|config| assert!(config.builder_proposals)); +} +#[test] +fn no_builder_registration_timestamp_override_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(config.builder_registration_timestamp_override.is_none())); +} +#[test] +fn builder_registration_timestamp_override_flag() { + CommandLineTest::new() + .flag("builder-registration-timestamp-override", Some("100")) + .run() + .with_config(|config| { + assert_eq!(config.builder_registration_timestamp_override, Some(100)) + }); +} +#[test] +fn strict_fee_recipient_flag() { + CommandLineTest::new() + .flag("strict-fee-recipient", None) + .run() + .with_config(|config| assert!(config.strict_fee_recipient)); +} +#[test] +fn no_strict_fee_recipient_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.strict_fee_recipient)); +} diff --git a/scripts/local_testnet/print_logs.sh b/scripts/local_testnet/dump_logs.sh similarity index 83% rename from scripts/local_testnet/print_logs.sh rename to scripts/local_testnet/dump_logs.sh index 2a9e7822a6..dc5f4edd38 100755 --- a/scripts/local_testnet/print_logs.sh +++ b/scripts/local_testnet/dump_logs.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Print the tail of all the logs output from local testnet +# Print all the logs output from local testnet set -Eeuo pipefail @@ -12,6 +12,6 @@ do echo "=============================================================================" echo "$f" echo "=============================================================================" - tail "$f" + cat "$f" echo "" done diff --git a/scripts/local_testnet/kill_processes.sh b/scripts/local_testnet/kill_processes.sh index be6b7f3d66..d63725ac14 100755 --- a/scripts/local_testnet/kill_processes.sh +++ b/scripts/local_testnet/kill_processes.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Kill processes -set -Eeuo pipefail +set -Euo pipefail # First parameter is the file with # one pid per line. diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index a5c6c0b5eb..dcc0a5382a 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -5,14 +5,19 @@ set -Eeuo pipefail source ./vars.env +# Set a higher ulimit in case we want to import 1000s of validators. +ulimit -n 65536 + # VC_COUNT is defaulted in vars.env DEBUG_LEVEL=${DEBUG_LEVEL:-info} +BUILDER_PROPOSALS= # Get options -while getopts "v:d:h" flag; do +while getopts "v:d:ph" flag; do case "${flag}" in v) VC_COUNT=${OPTARG};; d) DEBUG_LEVEL=${OPTARG};; + p) BUILDER_PROPOSALS="-p";; h) validators=$(( $VALIDATOR_COUNT / $BN_COUNT )) echo "Start local testnet, defaults: 1 eth1 node, $BN_COUNT beacon nodes," @@ -23,6 +28,7 @@ while getopts "v:d:h" flag; do echo "Options:" echo " -v: VC_COUNT default: $VC_COUNT" echo " -d: DEBUG_LEVEL default: info" + echo " -p: enable private tx proposals" echo " -h: this help" exit ;; @@ -113,7 +119,7 @@ done # Start requested number of validator clients for (( vc=1; vc<=$VC_COUNT; vc++ )); do - execute_command_add_PID validator_node_$vc.log ./validator_client.sh $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) $DEBUG_LEVEL + execute_command_add_PID validator_node_$vc.log ./validator_client.sh $BUILDER_PROPOSALS -d $DEBUG_LEVEL $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) done echo "Started!" diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 5aa75dfe2d..975a2a6753 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -10,13 +10,24 @@ set -Eeuo pipefail source ./vars.env -DEBUG_LEVEL=${3:-info} +DEBUG_LEVEL=info + +BUILDER_PROPOSALS= + +# Get options +while getopts "pd:" flag; do + case "${flag}" in + p) BUILDER_PROPOSALS="--builder-proposals";; + d) DEBUG_LEVEL=${OPTARG};; + esac +done exec lighthouse \ --debug-level $DEBUG_LEVEL \ vc \ - --datadir $1 \ + $BUILDER_PROPOSALS \ + --datadir ${@:$OPTIND:1} \ --testnet-dir $TESTNET_DIR \ --init-slashing-protection \ - --beacon-nodes $2 \ + --beacon-nodes ${@:$OPTIND+1:1} \ $VC_ARGS diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index efb1046452..b6ea89794f 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -18,7 +18,7 @@ GENESIS_VALIDATOR_COUNT=80 # Number of beacon_node instances that you intend to run BN_COUNT=4 -# Number of valicator clients +# Number of validator clients VC_COUNT=$BN_COUNT # Number of seconds to delay to start genesis block. diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index d51fe2aef2..376fe3d8c5 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -18,7 +18,7 @@ GENESIS_VALIDATOR_COUNT=80 # Number of beacon_node instances that you intend to run BN_COUNT=4 -# Number of valicator clients +# Number of validator clients VC_COUNT=$BN_COUNT # Number of seconds to delay to start genesis block. diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index 61b95397d7..32e2d5648d 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,9 +1,10 @@ -FROM rust:1.58.1-bullseye AS builder +FROM rust:1.62.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse -# build lighthouse directly with a cargo build command, bypassing the makefile -RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse +# Build lighthouse directly with a cargo build command, bypassing the Makefile. +# We have to use nightly in order to disable the new LLVM pass manager. +RUN rustup default nightly-2022-07-26 && cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Znew-llvm-pass-manager=no -Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse # build lcli binary directly with cargo install command, bypassing the makefile RUN cargo install --path /lighthouse/lcli --force --locked diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 13d8f631cc..b237bfb761 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.1.10 +TESTS_TAG := v1.2.0-rc.1 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 2eb4ce5407..87953a6141 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -33,6 +33,8 @@ excluded_paths = [ "tests/.*/.*/ssz_static/LightClientSnapshot", # Merkle-proof tests for light clients "tests/.*/.*/merkle/single_proof", + # Capella tests are disabled for now. + "tests/.*/capella", # One of the EF researchers likes to pack the tarballs on a Mac ".*\.DS_Store.*" ] diff --git a/testing/ef_tests/src/bls_setting.rs b/testing/ef_tests/src/bls_setting.rs index add7d8b7bd..24aaf60080 100644 --- a/testing/ef_tests/src/bls_setting.rs +++ b/testing/ef_tests/src/bls_setting.rs @@ -2,20 +2,15 @@ use self::BlsSetting::*; use crate::error::Error; use serde_repr::Deserialize_repr; -#[derive(Deserialize_repr, Debug, Clone, Copy)] +#[derive(Deserialize_repr, Debug, Clone, Copy, Default)] #[repr(u8)] pub enum BlsSetting { + #[default] Flexible = 0, Required = 1, Ignored = 2, } -impl Default for BlsSetting { - fn default() -> Self { - Flexible - } -} - impl BlsSetting { /// Check the BLS setting and skip the test if it isn't compatible with the crypto config. pub fn check(self) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 08722c8e46..0283d13da4 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -88,17 +88,23 @@ impl<E: EthSpec> EpochTransition<E> for JustificationAndFinalization { BeaconState::Base(_) => { let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; validator_statuses.process_attestations(state)?; - base::process_justification_and_finalization( - state, - &validator_statuses.total_balances, - spec, - ) + let justification_and_finalization_state = + base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) } BeaconState::Altair(_) | BeaconState::Merge(_) => { - altair::process_justification_and_finalization( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - ) + let justification_and_finalization_state = + altair::process_justification_and_finalization( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + )?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) } } } @@ -270,7 +276,8 @@ impl<E: EthSpec, T: EpochTransition<E>> Case for EpochProcessing<E, T> { && T::name() != "inactivity_updates" && T::name() != "participation_flag_updates" } - ForkName::Altair | ForkName::Merge => true, // TODO: revisit when tests are out + // No phase0 tests for Altair and later. + ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 4f9f4dacad..65872efbe9 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,7 +7,7 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, CachedHead, + BeaconChainTypes, CachedHead, CountUnrealized, }; use serde_derive::Deserialize; use ssz_derive::Decode; @@ -16,8 +16,8 @@ use std::future::Future; use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecutionBlockHash, ForkName, - Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, + ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -45,16 +45,19 @@ pub struct Checks { justified_checkpoint_root: Option<Hash256>, finalized_checkpoint: Option<Checkpoint>, best_justified_checkpoint: Option<Checkpoint>, + u_justified_checkpoint: Option<Checkpoint>, + u_finalized_checkpoint: Option<Checkpoint>, proposer_boost_root: Option<Hash256>, } #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step<B, A, P> { +pub enum Step<B, A, AS, P> { Tick { tick: u64 }, ValidBlock { block: B }, MaybeValidBlock { block: B, valid: bool }, Attestation { attestation: A }, + AttesterSlashing { attester_slashing: AS }, PowBlock { pow_block: P }, Checks { checks: Box<Checks> }, } @@ -71,16 +74,8 @@ pub struct ForkChoiceTest<E: EthSpec> { pub description: String, pub anchor_state: BeaconState<E>, pub anchor_block: BeaconBlock<E>, - pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, PowBlock>>, -} - -/// Spec for fork choice tests, with proposer boosting enabled. -/// -/// This function can be deleted once `ChainSpec::mainnet` enables proposer boosting by default. -pub fn fork_choice_spec<E: EthSpec>(fork_name: ForkName) -> ChainSpec { - let mut spec = testing_spec::<E>(fork_name); - spec.proposer_score_boost = Some(70); - spec + #[allow(clippy::type_complexity)] + pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, AttesterSlashing<E>, PowBlock>>, } impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { @@ -92,8 +87,9 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { .to_str() .expect("path must be valid OsStr") .to_string(); - let spec = &fork_choice_spec::<E>(fork_name); - let steps: Vec<Step<String, String, String>> = yaml_decode_file(&path.join("steps.yaml"))?; + let spec = &testing_spec::<E>(fork_name); + let steps: Vec<Step<String, String, String, String>> = + yaml_decode_file(&path.join("steps.yaml"))?; // Resolve the object names in `steps.yaml` into actual decoded block/attestation objects. let steps = steps .into_iter() @@ -115,6 +111,10 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))) .map(|attestation| Step::Attestation { attestation }) } + Step::AttesterSlashing { attester_slashing } => { + ssz_decode_file(&path.join(format!("{}.ssz_snappy", attester_slashing))) + .map(|attester_slashing| Step::AttesterSlashing { attester_slashing }) + } Step::PowBlock { pow_block } => { ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) .map(|pow_block| Step::PowBlock { pow_block }) @@ -154,7 +154,7 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { - let tester = Tester::new(self, fork_choice_spec::<E>(fork_name))?; + let tester = Tester::new(self, testing_spec::<E>(fork_name))?; // TODO(merge): re-enable this test before production. // This test is skipped until we can do retrospective confirmations of the terminal @@ -171,6 +171,9 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { tester.process_block(block.clone(), *valid)? } Step::Attestation { attestation } => tester.process_attestation(attestation)?, + Step::AttesterSlashing { attester_slashing } => { + tester.process_attester_slashing(attester_slashing) + } Step::PowBlock { pow_block } => tester.process_pow_block(pow_block), Step::Checks { checks } => { let Checks { @@ -181,6 +184,8 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { justified_checkpoint_root, finalized_checkpoint, best_justified_checkpoint, + u_justified_checkpoint, + u_finalized_checkpoint, proposer_boost_root, } = checks.as_ref(); @@ -214,6 +219,14 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { .check_best_justified_checkpoint(*expected_best_justified_checkpoint)?; } + if let Some(expected_u_justified_checkpoint) = u_justified_checkpoint { + tester.check_u_justified_checkpoint(*expected_u_justified_checkpoint)?; + } + + if let Some(expected_u_finalized_checkpoint) = u_finalized_checkpoint { + tester.check_u_finalized_checkpoint(*expected_u_finalized_checkpoint)?; + } + if let Some(expected_proposer_boost_root) = proposer_boost_root { tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?; } @@ -300,8 +313,7 @@ impl<E: EthSpec> Tester<E> { fn find_head(&self) -> Result<CachedHead<E>, Error> { let chain = self.harness.chain.clone(); - self.block_on_dangerous(chain.recompute_head_at_current_slot())? - .map_err(|e| Error::InternalError(format!("failed to find head with {:?}", e)))?; + self.block_on_dangerous(chain.recompute_head_at_current_slot())?; Ok(self.harness.chain.canonical_head.cached_head()) } @@ -319,14 +331,18 @@ impl<E: EthSpec> Tester<E> { .chain .canonical_head .fork_choice_write_lock() - .update_time(slot) + .update_time(slot, &self.spec) .unwrap(); } pub fn process_block(&self, block: SignedBeaconBlock<E>, valid: bool) -> Result<(), Error> { let block_root = block.canonical_root(); let block = Arc::new(block); - let result = self.block_on_dangerous(self.harness.chain.process_block(block.clone()))?; + let result = self.block_on_dangerous( + self.harness + .chain + .process_block(block.clone(), CountUnrealized::True), + )?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", @@ -384,6 +400,7 @@ impl<E: EthSpec> Tester<E> { &state, PayloadVerificationStatus::Irrelevant, &self.harness.chain.spec, + self.harness.chain.config.count_unrealized.into(), ); if result.is_ok() { @@ -416,6 +433,14 @@ impl<E: EthSpec> Tester<E> { .map_err(|e| Error::InternalError(format!("attestation import failed with {:?}", e))) } + pub fn process_attester_slashing(&self, attester_slashing: &AttesterSlashing<E>) { + self.harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_attester_slashing(attester_slashing) + } + pub fn process_pow_block(&self, pow_block: &PowBlock) { let el = self.harness.mock_execution_layer.as_ref().unwrap(); @@ -520,6 +545,40 @@ impl<E: EthSpec> Tester<E> { ) } + pub fn check_u_justified_checkpoint( + &self, + expected_checkpoint: Checkpoint, + ) -> Result<(), Error> { + let u_justified_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .unrealized_justified_checkpoint(); + check_equal( + "u_justified_checkpoint", + u_justified_checkpoint, + expected_checkpoint, + ) + } + + pub fn check_u_finalized_checkpoint( + &self, + expected_checkpoint: Checkpoint, + ) -> Result<(), Error> { + let u_finalized_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .unrealized_finalized_checkpoint(); + check_equal( + "u_finalized_checkpoint", + u_finalized_checkpoint, + expected_checkpoint, + ) + } + pub fn check_expected_proposer_boost_root( &self, expected_proposer_boost_root: Hash256, diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index f86148312f..798dae083b 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -17,8 +17,9 @@ use state_processing::per_block_processing::{ use std::fmt::Debug; use std::path::Path; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, ForkName, - FullPayload, ProposerSlashing, SignedVoluntaryExit, SyncAggregate, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, + SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -255,6 +256,40 @@ impl<E: EthSpec> Operation<E> for FullPayload<E> { } } } +impl<E: EthSpec> Operation<E> for BlindedPayload<E> { + fn handler_name() -> String { + "execution_payload".into() + } + + fn filename() -> String { + "execution_payload.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair + } + + fn decode(path: &Path, _spec: &ChainSpec) -> Result<Self, Error> { + ssz_decode_file::<ExecutionPayload<E>>(path).map(Into::into) + } + + fn apply_to( + &self, + state: &mut BeaconState<E>, + spec: &ChainSpec, + extra: &Operations<E, Self>, + ) -> Result<(), BlockProcessingError> { + let valid = extra + .execution_metadata + .as_ref() + .map_or(false, |e| e.execution_valid); + if valid { + process_execution_payload(state, self, spec) + } else { + Err(BlockProcessingError::ExecutionInvalid) + } + } +} impl<E: EthSpec, O: Operation<E>> LoadCase for Operations<E, O> { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result<Self, Error> { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 25299bf577..13c0a8c54a 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -52,7 +52,7 @@ pub trait Handler { .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) }; let test_cases = fs::read_dir(&handler_path) - .expect("handler dir exists") + .unwrap_or_else(|e| panic!("handler dir {} exists: {:?}", handler_path.display(), e)) .filter_map(as_directory) .flat_map(|suite| fs::read_dir(suite.path()).expect("suite dir exists")) .filter_map(as_directory) diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 540fe6903e..c075e89b3f 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -56,6 +56,7 @@ type_name!(Eth1Data); type_name_generic!(ExecutionPayload); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); +type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); type_name_generic!(HistoricalBatch); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index a36253f24e..31abbd1591 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -71,11 +71,17 @@ fn operations_sync_aggregate() { } #[test] -fn operations_execution_payload() { +fn operations_execution_payload_full() { OperationsHandler::<MinimalEthSpec, FullPayload<_>>::default().run(); OperationsHandler::<MainnetEthSpec, FullPayload<_>>::default().run(); } +#[test] +fn operations_execution_payload_blinded() { + OperationsHandler::<MinimalEthSpec, BlindedPayload<_>>::default().run(); + OperationsHandler::<MainnetEthSpec, BlindedPayload<_>>::default().run(); +} + #[test] fn sanity_blocks() { SanityBlocksHandler::<MinimalEthSpec>::default().run(); @@ -377,8 +383,9 @@ fn epoch_processing_participation_record_updates() { #[test] fn epoch_processing_sync_committee_updates() { + // There are presently no mainnet tests, see: + // https://github.com/ethereum/consensus-spec-tests/issues/29 EpochProcessingHandler::<MinimalEthSpec, SyncCommitteeUpdates>::default().run(); - EpochProcessingHandler::<MainnetEthSpec, SyncCommitteeUpdates>::default().run(); } #[test] diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index fc8230c7a2..7a8d7e99b5 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -15,3 +15,9 @@ execution_layer = { path = "../../beacon_node/execution_layer" } sensitive_url = { path = "../../common/sensitive_url" } types = { path = "../../consensus/types" } unused_port = { path = "../../common/unused_port" } +ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +ethers-providers = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +deposit_contract = { path = "../../common/deposit_contract" } +reqwest = { version = "0.11.0", features = ["json"] } +hex = "0.4.2" +fork_choice = { path = "../../consensus/fork_choice" } diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index 7df88aa0d7..ad5af53158 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -1,3 +1,4 @@ +use ethers_providers::{Http, Provider}; use execution_layer::DEFAULT_JWT_FILE; use sensitive_url::SensitiveUrl; use std::path::PathBuf; @@ -5,6 +6,14 @@ use std::process::Child; use tempfile::TempDir; use unused_port::unused_tcp_port; +pub const KEYSTORE_PASSWORD: &str = "testpwd"; +pub const ACCOUNT1: &str = "7b8C3a386C0eea54693fFB0DA17373ffC9228139"; +pub const ACCOUNT2: &str = "dA2DD7560DB7e212B945fC72cEB54B7D8C886D77"; +pub const PRIVATE_KEYS: [&str; 2] = [ + "115fe42a60e5ef45f5490e599add1f03c73aeaca129c2c41451eca6cf8ff9e04", + "6a692e710077d9000be1326acbe32f777b403902ac8779b19eb1398b849c99c3", +]; + /// Defined for each EE type (e.g., Geth, Nethermind, etc). pub trait GenericExecutionEngine: Clone { fn init_datadir() -> TempDir; @@ -22,8 +31,10 @@ pub struct ExecutionEngine<E> { engine: E, #[allow(dead_code)] datadir: TempDir, + http_port: u16, http_auth_port: u16, child: Child, + pub provider: Provider<Http>, } impl<E> Drop for ExecutionEngine<E> { @@ -42,11 +53,15 @@ impl<E: GenericExecutionEngine> ExecutionEngine<E> { let http_port = unused_tcp_port().unwrap(); let http_auth_port = unused_tcp_port().unwrap(); let child = E::start_client(&datadir, http_port, http_auth_port, jwt_secret_path); + let provider = Provider::<Http>::try_from(format!("http://localhost:{}", http_port)) + .expect("failed to instantiate ethers provider"); Self { engine, datadir, + http_port, http_auth_port, child, + provider, } } @@ -54,6 +69,10 @@ impl<E: GenericExecutionEngine> ExecutionEngine<E> { SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_auth_port)).unwrap() } + pub fn http_url(&self) -> SensitiveUrl { + SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() + } + pub fn datadir(&self) -> PathBuf { self.datadir.path().to_path_buf() } diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs index 87fdaec14a..17654b292a 100644 --- a/testing/execution_engine_integration/src/genesis_json.rs +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -32,7 +32,12 @@ pub fn geth_genesis_json() -> Value { "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", "coinbase":"0x0000000000000000000000000000000000000000", "alloc":{ - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"} + "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { + "balance": "10000000000000000000000000" + }, + "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { + "balance": "10000000000000000000000000" + }, }, "number":"0x0", "gasUsed":"0x0", @@ -40,3 +45,87 @@ pub fn geth_genesis_json() -> Value { "baseFeePerGas":"0x7" }) } + +/// Modified kiln config +pub fn nethermind_genesis_json() -> Value { + json!( + { + "name": "lighthouse_test_network", + "engine": { + "Ethash": { + "params": { + "minimumDifficulty": "0x20000", + "difficultyBoundDivisor": "0x800", + "durationLimit": "0xd", + "blockReward": { + "0x0": "0x1BC16D674EC80000" + }, + "homesteadTransition": "0x0", + "eip100bTransition": "0x0", + "difficultyBombDelays": {} + } + } + }, + "params": { + "gasLimitBoundDivisor": "0x400", + "registrar": "0x0000000000000000000000000000000000000000", + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID": "0x1469ca", + "MergeForkIdTransition": "0x3e8", + "eip150Transition": "0x0", + "eip158Transition": "0x0", + "eip160Transition": "0x0", + "eip161abcTransition": "0x0", + "eip161dTransition": "0x0", + "eip155Transition": "0x0", + "eip140Transition": "0x0", + "eip211Transition": "0x0", + "eip214Transition": "0x0", + "eip658Transition": "0x0", + "eip145Transition": "0x0", + "eip1014Transition": "0x0", + "eip1052Transition": "0x0", + "eip1283Transition": "0x0", + "eip1283DisableTransition": "0x0", + "eip152Transition": "0x0", + "eip1108Transition": "0x0", + "eip1344Transition": "0x0", + "eip1884Transition": "0x0", + "eip2028Transition": "0x0", + "eip2200Transition": "0x0", + "eip2565Transition": "0x0", + "eip2929Transition": "0x0", + "eip2930Transition": "0x0", + "eip1559Transition": "0x0", + "eip3198Transition": "0x0", + "eip3529Transition": "0x0", + "eip3541Transition": "0x0" + }, + "genesis": { + "seal": { + "ethereum": { + "nonce": "0x1234", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty": "0x01", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "", + "gasLimit": "0x1C9C380" + }, + "accounts": { + "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { + "balance": "10000000000000000000000000" + }, + "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { + "balance": "10000000000000000000000000" + }, + }, + "nodes": [] + } + ) +} diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 129faea907..ae5210b2a3 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,7 +7,7 @@ use std::{env, fs::File}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const GETH_BRANCH: &str = "master"; +// const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { @@ -26,8 +26,13 @@ pub fn build(execution_clients_dir: &Path) { build_utils::clone_repo(execution_clients_dir, GETH_REPO_URL).unwrap(); } + // TODO: this should be set back to the latest release once the following issue is resolved: + // + // - https://github.com/ethereum/go-ethereum/issues/25427 + // // Get the latest tag on the branch - let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + // let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + let last_release = "v1.10.20"; build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth @@ -90,13 +95,14 @@ impl GenericExecutionEngine for GethEngine { .arg(datadir.path().to_str().unwrap()) .arg("--http") .arg("--http.api") - .arg("engine,eth") + .arg("engine,eth,personal") .arg("--http.port") .arg(http_port.to_string()) .arg("--authrpc.port") .arg(http_auth_port.to_string()) .arg("--port") .arg(network_port.to_string()) + .arg("--allow-insecure-unlock") .arg("--authrpc.jwtsecret") .arg(jwt_secret_path.as_path().to_str().unwrap()) .stdout(build_utils::build_stdio()) diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index a4ec0f9215..bd3436602c 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -1,3 +1,4 @@ +#![recursion_limit = "1024"] /// This binary runs integration tests between Lighthouse and execution engines. /// /// It will first attempt to build any supported integration clients, then it will run tests. @@ -9,6 +10,7 @@ mod genesis_json; mod geth; mod nethermind; mod test_rig; +mod transactions; use geth::GethEngine; use nethermind::NethermindEngine; diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index df345f36be..1fe7bf0f05 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -1,6 +1,8 @@ use crate::build_utils; use crate::execution_engine::GenericExecutionEngine; +use crate::genesis_json::nethermind_genesis_json; use std::env; +use std::fs::File; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; use tempfile::TempDir; @@ -69,33 +71,43 @@ impl NethermindEngine { impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { - TempDir::new().unwrap() + let datadir = TempDir::new().unwrap(); + let genesis_json_path = datadir.path().join("genesis.json"); + let mut file = File::create(&genesis_json_path).unwrap(); + let json = nethermind_genesis_json(); + serde_json::to_writer(&mut file, &json).unwrap(); + datadir } fn start_client( datadir: &TempDir, - _http_port: u16, + http_port: u16, http_auth_port: u16, jwt_secret_path: PathBuf, ) -> Child { let network_port = unused_tcp_port().unwrap(); + let genesis_json_path = datadir.path().join("genesis.json"); Command::new(Self::binary_path()) .arg("--datadir") .arg(datadir.path().to_str().unwrap()) .arg("--config") .arg("kiln") + .arg("--Init.ChainSpecPath") + .arg(genesis_json_path.to_str().unwrap()) .arg("--Merge.TerminalTotalDifficulty") .arg("0") + .arg("--JsonRpc.Enabled") + .arg("true") + .arg("--JsonRpc.EnabledModules") + .arg("net,eth,subscribe,web3,admin,personal") + .arg("--JsonRpc.Port") + .arg(http_port.to_string()) .arg("--JsonRpc.AdditionalRpcUrls") .arg(format!( "http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client", http_auth_port )) - .arg("--JsonRpc.EnabledModules") - .arg("net,eth,subscribe,web3,admin,engine") - .arg("--JsonRpc.Port") - .arg(http_auth_port.to_string()) .arg("--Network.DiscoveryPort") .arg(network_port.to_string()) .arg("--Network.P2PPort") diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 5b23af4fa1..0aa960bc41 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -1,14 +1,23 @@ -use crate::execution_engine::{ExecutionEngine, GenericExecutionEngine}; -use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; +use crate::execution_engine::{ + ExecutionEngine, GenericExecutionEngine, ACCOUNT1, ACCOUNT2, KEYSTORE_PASSWORD, PRIVATE_KEYS, +}; +use crate::transactions::transactions; +use ethers_providers::Middleware; +use execution_layer::{ + BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadStatus, +}; +use fork_choice::ForkchoiceUpdateParameters; +use reqwest::{header::CONTENT_TYPE, Client}; +use sensitive_url::SensitiveUrl; +use serde_json::{json, Value}; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - MainnetEthSpec, Slot, Uint256, + MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; - const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); struct ExecutionPair<E, T: EthSpec> { @@ -32,6 +41,63 @@ pub struct TestRig<E, T: EthSpec = MainnetEthSpec> { _runtime_shutdown: exit_future::Signal, } +/// Import a private key into the execution engine and unlock it so that we can +/// make transactions with the corresponding account. +async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: &str) { + for priv_key in priv_keys { + let body = json!( + { + "jsonrpc":"2.0", + "method":"personal_importRawKey", + "params":[priv_key, password], + "id":1 + } + ); + + let client = Client::builder().build().unwrap(); + let request = client + .post(http_url.full.clone()) + .header(CONTENT_TYPE, "application/json") + .json(&body); + + let response: Value = request + .send() + .await + .unwrap() + .error_for_status() + .unwrap() + .json() + .await + .unwrap(); + + let account = response.get("result").unwrap().as_str().unwrap(); + + let body = json!( + { + "jsonrpc":"2.0", + "method":"personal_unlockAccount", + "params":[account, password], + "id":1 + } + ); + + let request = client + .post(http_url.full.clone()) + .header(CONTENT_TYPE, "application/json") + .json(&body); + + let _response: Value = request + .send() + .await + .unwrap() + .error_for_status() + .unwrap() + .json() + .await + .unwrap(); + } +} + impl<E: GenericExecutionEngine> TestRig<E> { pub fn new(generic_engine: E) -> Self { let log = environment::null_logger().unwrap(); @@ -125,6 +191,20 @@ impl<E: GenericExecutionEngine> TestRig<E> { pub async fn perform_tests(&self) { self.wait_until_synced().await; + // Import and unlock all private keys to sign transactions + let _ = futures::future::join_all([&self.ee_a, &self.ee_b].iter().map(|ee| { + import_and_unlock( + ee.execution_engine.http_url(), + &PRIVATE_KEYS, + KEYSTORE_PASSWORD, + ) + })) + .await; + + // We hardcode the accounts here since some EEs start with a default unlocked account + let account1 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT1).unwrap()); + let account2 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT2).unwrap()); + /* * Check the transition config endpoint. */ @@ -142,7 +222,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { let terminal_pow_block_hash = self .ee_a .execution_layer - .get_terminal_pow_block_hash(&self.spec) + .get_terminal_pow_block_hash(&self.spec, timestamp_now()) .await .unwrap() .unwrap(); @@ -151,12 +231,23 @@ impl<E: GenericExecutionEngine> TestRig<E> { terminal_pow_block_hash, self.ee_b .execution_layer - .get_terminal_pow_block_hash(&self.spec) + .get_terminal_pow_block_hash(&self.spec, timestamp_now()) .await .unwrap() .unwrap() ); + // Submit transactions before getting payload + let txs = transactions::<MainnetEthSpec>(account1, account2); + for tx in txs.clone().into_iter() { + self.ee_a + .execution_engine + .provider + .send_transaction(tx, None) + .await + .unwrap(); + } + /* * Execution Engine A: * @@ -166,8 +257,61 @@ impl<E: GenericExecutionEngine> TestRig<E> { let parent_hash = terminal_pow_block_hash; let timestamp = timestamp_now(); let prev_randao = Hash256::zero(); + let head_root = Hash256::zero(); + let justified_block_hash = ExecutionBlockHash::zero(); let finalized_block_hash = ExecutionBlockHash::zero(); + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root, + head_hash: Some(parent_hash), + justified_hash: Some(justified_block_hash), + finalized_hash: Some(finalized_block_hash), + }; let proposer_index = 0; + + let prepared = self + .ee_a + .execution_layer + .insert_proposer( + Slot::new(1), // Insert proposer for the next slot + head_root, + proposer_index, + PayloadAttributes { + timestamp, + prev_randao, + suggested_fee_recipient: Address::zero(), + }, + ) + .await; + + assert!(!prepared, "Inserting proposer for the first time"); + + // Make a fcu call with the PayloadAttributes that we inserted previously + let prepare = self + .ee_a + .execution_layer + .notify_forkchoice_updated( + parent_hash, + justified_block_hash, + finalized_block_hash, + Slot::new(0), + Hash256::zero(), + ) + .await + .unwrap(); + + assert_eq!(prepare, PayloadStatus::Valid); + + // Add a delay to give the EE sufficient time to pack the + // submitted transactions into a payload. + // This is required when running on under resourced nodes and + // in CI. + sleep(Duration::from_secs(3)).await; + + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot: Slot::new(0), + chain_health: ChainHealth::Healthy, + }; let valid_payload = self .ee_a .execution_layer @@ -175,15 +319,17 @@ impl<E: GenericExecutionEngine> TestRig<E> { parent_hash, timestamp, prev_randao, - finalized_block_hash, proposer_index, - None, - Slot::new(0), + forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() .execution_payload; + assert_eq!(valid_payload.transactions.len(), txs.len()); + /* * Execution Engine A: * @@ -197,7 +343,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_a .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Syncing); @@ -231,7 +383,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_a .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -261,8 +419,12 @@ impl<E: GenericExecutionEngine> TestRig<E> { let parent_hash = valid_payload.block_hash; let timestamp = valid_payload.timestamp + 1; let prev_randao = Hash256::zero(); - let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot: Slot::new(0), + chain_health: ChainHealth::Healthy, + }; let second_payload = self .ee_a .execution_layer @@ -270,10 +432,10 @@ impl<E: GenericExecutionEngine> TestRig<E> { parent_hash, timestamp, prev_randao, - finalized_block_hash, proposer_index, - None, - Slot::new(0), + forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() @@ -316,7 +478,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_a .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -332,7 +500,11 @@ impl<E: GenericExecutionEngine> TestRig<E> { .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatus::Accepted); + // TODO: we should remove the `Accepted` status here once Geth fixes it + assert!(matches!( + status, + PayloadStatus::Syncing | PayloadStatus::Accepted + )); /* * Execution Engine B: @@ -346,7 +518,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_b .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Syncing); @@ -392,7 +570,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_b .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs new file mode 100644 index 0000000000..144946682b --- /dev/null +++ b/testing/execution_engine_integration/src/transactions.rs @@ -0,0 +1,87 @@ +use deposit_contract::{encode_eth1_tx_data, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS}; +use ethers_core::types::{ + transaction::{eip2718::TypedTransaction, eip2930::AccessList}, + Address, Bytes, Eip1559TransactionRequest, TransactionRequest, +}; +use types::{DepositData, EthSpec, Hash256, Keypair, Signature}; + +/// Hardcoded deposit contract address based on sender address and nonce +pub const DEPOSIT_CONTRACT_ADDRESS: &str = "64f43BEc7F86526686C931d65362bB8698872F90"; + +#[derive(Debug)] +pub enum Transaction { + Transfer(Address, Address), + TransferLegacy(Address, Address), + TransferAccessList(Address, Address), + DeployDepositContract(Address), + DepositDepositContract { + sender: Address, + deposit_contract_address: Address, + }, +} + +/// Get a list of transactions to publish to the execution layer. +pub fn transactions<E: EthSpec>(account1: Address, account2: Address) -> Vec<TypedTransaction> { + vec![ + Transaction::Transfer(account1, account2).transaction::<E>(), + Transaction::TransferLegacy(account1, account2).transaction::<E>(), + Transaction::TransferAccessList(account1, account2).transaction::<E>(), + Transaction::DeployDepositContract(account1).transaction::<E>(), + Transaction::DepositDepositContract { + sender: account1, + deposit_contract_address: ethers_core::types::Address::from_slice( + &hex::decode(&DEPOSIT_CONTRACT_ADDRESS).unwrap(), + ), + } + .transaction::<E>(), + ] +} + +impl Transaction { + pub fn transaction<E: EthSpec>(&self) -> TypedTransaction { + match &self { + Self::TransferLegacy(from, to) => TransactionRequest::new() + .from(*from) + .to(*to) + .value(1) + .into(), + Self::Transfer(from, to) => Eip1559TransactionRequest::new() + .from(*from) + .to(*to) + .value(1) + .into(), + Self::TransferAccessList(from, to) => TransactionRequest::new() + .from(*from) + .to(*to) + .value(1) + .with_access_list(AccessList::default()) + .into(), + Self::DeployDepositContract(addr) => TransactionRequest::new() + .from(*addr) + .data(Bytes::from(BYTECODE.to_vec())) + .gas(CONTRACT_DEPLOY_GAS) + .into(), + Self::DepositDepositContract { + sender, + deposit_contract_address, + } => { + let keypair = Keypair::random(); + + let mut deposit = DepositData { + pubkey: keypair.pk.into(), + withdrawal_credentials: Hash256::zero(), + amount: 32_000_000_000, + signature: Signature::empty().into(), + }; + + deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); + TransactionRequest::new() + .from(*sender) + .to(*deposit_contract_address) + .data(Bytes::from(encode_eth1_tx_data(&deposit).unwrap())) + .gas(DEPOSIT_GAS) + .into() + } + } + } +} diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 8e4b8595df..2c9bd5939f 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -13,3 +13,4 @@ eth2 = { path = "../../common/eth2" } validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } sensitive_url = { path = "../../common/sensitive_url" } +execution_layer = { path = "../../beacon_node/execution_layer" } \ No newline at end of file diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index acf9bb9e68..0933bff4c6 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -17,6 +17,9 @@ use validator_dir::insecure_keys::build_deterministic_validator_dirs; pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient}; pub use environment; pub use eth2; +pub use execution_layer::test_utils::{ + Config as MockServerConfig, MockExecutionConfig, MockServer, +}; pub use validator_client::Config as ValidatorConfig; /// The global timeout for HTTP requests to the beacon node. @@ -211,3 +214,29 @@ impl<E: EthSpec> LocalValidatorClient<E> { }) } } + +/// Provides an execution engine api server that is running in the current process on a given tokio executor (it +/// is _local_ to this process). +/// +/// Intended for use in testing and simulation. Not for production. +pub struct LocalExecutionNode<E: EthSpec> { + pub server: MockServer<E>, + pub datadir: TempDir, +} + +impl<E: EthSpec> LocalExecutionNode<E> { + pub fn new(context: RuntimeContext<E>, config: MockExecutionConfig) -> Self { + let datadir = TempBuilder::new() + .prefix("lighthouse_node_test_rig_el") + .tempdir() + .expect("should create temp directory for client datadir"); + let jwt_file_path = datadir.path().join("jwt.hex"); + if let Err(e) = std::fs::write(&jwt_file_path, config.jwt_key.hex_string()) { + panic!("Failed to write jwt file {}", e); + } + Self { + server: MockServer::new_with_config(&context.executor.handle().unwrap(), config), + datadir, + } + } +} diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 7ff387b9c6..02f4f76d51 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,7 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, Slot, Unsigned}; +use types::{Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, Unsigned}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. @@ -149,19 +149,19 @@ pub async fn verify_fork_version<E: EthSpec>( network: LocalNetwork<E>, fork_epoch: Epoch, slot_duration: Duration, - altair_fork_version: [u8; 4], + fork_version: [u8; 4], ) -> Result<(), String> { epoch_delay(fork_epoch, slot_duration, E::slots_per_epoch()).await; for remote_node in network.remote_nodes()? { - let fork_version = remote_node + let remote_fork_version = remote_node .get_beacon_states_fork(StateId::Head) .await .map(|resp| resp.unwrap().data.current_version) .map_err(|e| format!("Failed to get fork from beacon node: {:?}", e))?; - if fork_version != altair_fork_version { + if fork_version != remote_fork_version { return Err(format!( "Fork version after FORK_EPOCH is incorrect, got: {:?}, expected: {:?}", - fork_version, altair_fork_version, + remote_fork_version, fork_version, )); } } @@ -207,3 +207,39 @@ pub async fn verify_full_sync_aggregates_up_to<E: EthSpec>( Ok(()) } + +/// Verify that the first merged PoS block got finalized. +pub async fn verify_transition_block_finalized<E: EthSpec>( + network: LocalNetwork<E>, + transition_epoch: Epoch, + slot_duration: Duration, + should_verify: bool, +) -> Result<(), String> { + if !should_verify { + return Ok(()); + } + epoch_delay(transition_epoch + 2, slot_duration, E::slots_per_epoch()).await; + let mut block_hashes = Vec::new(); + for remote_node in network.remote_nodes()?.iter() { + let execution_block_hash: ExecutionBlockHash = remote_node + .get_beacon_blocks::<E>(BlockId::Finalized) + .await + .map(|body| body.unwrap().data) + .map_err(|e| format!("Get state root via http failed: {:?}", e))? + .message() + .execution_payload() + .map(|payload| payload.execution_payload.block_hash) + .map_err(|e| format!("Execution payload does not exist: {:?}", e))?; + block_hashes.push(execution_block_hash); + } + + let first = block_hashes[0]; + if first.into_root() != Hash256::zero() && block_hashes.iter().all(|&item| item == first) { + Ok(()) + } else { + Err(format!( + "Terminal block not finalized on all nodes Finalized block hashes:{:?}", + block_hashes + )) + } +} diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 28f1a25627..f1196502fb 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -36,6 +36,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("3") .help("Speed up factor. Please use a divisor of 12.")) + .arg(Arg::with_name("post-merge") + .short("m") + .long("post-merge") + .takes_value(false) + .help("Simulate the merge transition")) .arg(Arg::with_name("continue_after_checks") .short("c") .long("continue_after_checks") diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 4c773c70bf..c54944c2e1 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,4 +1,4 @@ -use crate::local_network::INVALID_ADDRESS; +use crate::local_network::{EXECUTION_PORT, INVALID_ADDRESS, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; @@ -18,8 +18,12 @@ use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; -const FORK_EPOCH: u64 = 2; const END_EPOCH: u64 = 16; +const ALTAIR_FORK_EPOCH: u64 = 1; +const BELLATRIX_FORK_EPOCH: u64 = 2; + +const SUGGESTED_FEE_RECIPIENT: [u8; 20] = + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); @@ -28,10 +32,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let speed_up_factor = value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); let continue_after_checks = matches.is_present("continue_after_checks"); + let post_merge_sim = matches.is_present("post-merge"); println!("Beacon Chain Simulator:"); println!(" nodes:{}", node_count); println!(" validators_per_node:{}", validators_per_node); + println!(" post merge simulation:{}", post_merge_sim); println!(" continue_after_checks:{}", continue_after_checks); // Generate the directories and keystores required for the validator clients. @@ -72,6 +78,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let total_validator_count = validators_per_node * node_count; let altair_fork_version = spec.altair_fork_version; + let bellatrix_fork_version = spec.bellatrix_fork_version; spec.seconds_per_slot /= speed_up_factor; spec.seconds_per_slot = max(1, spec.seconds_per_slot); @@ -80,8 +87,14 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { spec.min_genesis_time = 0; spec.min_genesis_active_validator_count = total_validator_count as u64; spec.seconds_per_eth1_block = eth1_block_time.as_secs(); - spec.altair_fork_epoch = Some(Epoch::new(FORK_EPOCH)); + spec.altair_fork_epoch = Some(Epoch::new(ALTAIR_FORK_EPOCH)); + // Set these parameters only if we are doing a merge simulation + if post_merge_sim { + spec.terminal_total_difficulty = TERMINAL_DIFFICULTY.into(); + spec.bellatrix_fork_epoch = Some(Epoch::new(BELLATRIX_FORK_EPOCH)); + } + let seconds_per_slot = spec.seconds_per_slot; let slot_duration = Duration::from_secs(spec.seconds_per_slot); let initial_validator_count = spec.min_genesis_active_validator_count as usize; let deposit_amount = env.eth2_config.spec.max_effective_balance; @@ -137,6 +150,19 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + if post_merge_sim { + let el_config = execution_layer::Config { + execution_endpoints: vec![SensitiveUrl::parse(&format!( + "http://localhost:{}", + EXECUTION_PORT + )) + .unwrap()], + ..Default::default() + }; + + beacon_config.execution_layer = Some(el_config); + } + /* * Create a new `LocalNetwork` with one beacon node. */ @@ -168,9 +194,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let network_1 = network.clone(); executor.spawn( async move { + let mut validator_config = testing_validator_config(); + if post_merge_sim { + validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into()); + } println!("Adding validator client {}", i); network_1 - .add_validator_client(testing_validator_config(), i, files, i % 2 == 0) + .add_validator_client(validator_config, i, files, i % 2 == 0) .await .expect("should add validator"); }, @@ -182,6 +212,21 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { println!("Duration to genesis: {}", duration_to_genesis.as_secs()); sleep(duration_to_genesis).await; + if post_merge_sim { + let executor = executor.clone(); + let network_2 = network.clone(); + executor.spawn( + async move { + println!("Mining pow blocks"); + let mut interval = tokio::time::interval(Duration::from_secs(seconds_per_slot)); + for i in 1..=TERMINAL_BLOCK + 1 { + interval.tick().await; + let _ = network_2.mine_pow_blocks(i); + } + }, + "pow_mining", + ); + } /* * Start the checks that ensure the network performs as expected. * @@ -190,7 +235,16 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * tests start at the right time. Whilst this is works well for now, it's subject to * breakage by changes to the VC. */ - let (finalization, block_prod, validator_count, onboarding, fork, sync_aggregate) = futures::join!( + + let ( + finalization, + block_prod, + validator_count, + onboarding, + fork, + sync_aggregate, + transition, + ) = futures::join!( // Check that the chain finalizes at the first given opportunity. checks::verify_first_finalization(network.clone(), slot_duration), // Check that a block is produced at every slot. @@ -212,21 +266,36 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { slot_duration, total_validator_count, ), - // Check that all nodes have transitioned to the new fork. + // Check that all nodes have transitioned to the required fork. checks::verify_fork_version( network.clone(), - Epoch::new(FORK_EPOCH), + if post_merge_sim { + Epoch::new(BELLATRIX_FORK_EPOCH) + } else { + Epoch::new(ALTAIR_FORK_EPOCH) + }, slot_duration, - altair_fork_version + if post_merge_sim { + bellatrix_fork_version + } else { + altair_fork_version + } ), // Check that all sync aggregates are full. checks::verify_full_sync_aggregates_up_to( network.clone(), // Start checking for sync_aggregates at `FORK_EPOCH + 1` to account for // inefficiencies in finding subnet peers at the `fork_slot`. - Epoch::new(FORK_EPOCH + 1).start_slot(MinimalEthSpec::slots_per_epoch()), + Epoch::new(ALTAIR_FORK_EPOCH + 1).start_slot(MinimalEthSpec::slots_per_epoch()), Epoch::new(END_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()), slot_duration, + ), + // Check that the transition block is finalized. + checks::verify_transition_block_finalized( + network.clone(), + Epoch::new(TERMINAL_BLOCK / MinimalEthSpec::slots_per_epoch()), + slot_duration, + post_merge_sim ) ); @@ -236,6 +305,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { onboarding?; fork?; sync_aggregate?; + transition?; // The `final_future` either completes immediately or never completes, depending on the value // of `continue_after_checks`. diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 6cfc3e6db7..8df912ed16 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,7 +1,8 @@ use node_test_rig::{ environment::RuntimeContext, eth2::{types::StateId, BeaconNodeHttpClient}, - ClientConfig, LocalBeaconNode, LocalValidatorClient, ValidatorConfig, ValidatorFiles, + ClientConfig, LocalBeaconNode, LocalExecutionNode, LocalValidatorClient, MockExecutionConfig, + MockServerConfig, ValidatorConfig, ValidatorFiles, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -15,11 +16,17 @@ use types::{Epoch, EthSpec}; const BOOTNODE_PORT: u16 = 42424; pub const INVALID_ADDRESS: &str = "http://127.0.0.1:42423"; +pub const EXECUTION_PORT: u16 = 4000; + +pub const TERMINAL_DIFFICULTY: u64 = 6400; +pub const TERMINAL_BLOCK: u64 = 64; + /// Helper struct to reduce `Arc` usage. pub struct Inner<E: EthSpec> { pub context: RuntimeContext<E>, pub beacon_nodes: RwLock<Vec<LocalBeaconNode<E>>>, pub validator_clients: RwLock<Vec<LocalValidatorClient<E>>>, + pub execution_nodes: RwLock<Vec<LocalExecutionNode<E>>>, } /// Represents a set of interconnected `LocalBeaconNode` and `LocalValidatorClient`. @@ -46,7 +53,7 @@ impl<E: EthSpec> Deref for LocalNetwork<E> { } impl<E: EthSpec> LocalNetwork<E> { - /// Creates a new network with a single `BeaconNode`. + /// Creates a new network with a single `BeaconNode` and a connected `ExecutionNode`. pub async fn new( context: RuntimeContext<E>, mut beacon_config: ClientConfig, @@ -56,6 +63,30 @@ impl<E: EthSpec> LocalNetwork<E> { beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT); beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT); beacon_config.network.discv5_config.table_filter = |_| true; + + let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { + let mock_execution_config = MockExecutionConfig { + server_config: MockServerConfig { + listen_port: EXECUTION_PORT, + ..Default::default() + }, + terminal_block: TERMINAL_BLOCK, + terminal_difficulty: TERMINAL_DIFFICULTY.into(), + ..Default::default() + }; + let execution_node = LocalExecutionNode::new( + context.service_context("boot_node_el".into()), + mock_execution_config, + ); + el_config.default_datadir = execution_node.datadir.path().to_path_buf(); + el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")]; + el_config.execution_endpoints = + vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()]; + vec![execution_node] + } else { + vec![] + }; + let beacon_node = LocalBeaconNode::production(context.service_context("boot_node".into()), beacon_config) .await?; @@ -63,6 +94,7 @@ impl<E: EthSpec> LocalNetwork<E> { inner: Arc::new(Inner { context, beacon_nodes: RwLock::new(vec![beacon_node]), + execution_nodes: RwLock::new(execution_node), validator_clients: RwLock::new(vec![]), }), }) @@ -87,6 +119,7 @@ impl<E: EthSpec> LocalNetwork<E> { /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. pub async fn add_beacon_node(&self, mut beacon_config: ClientConfig) -> Result<(), String> { let self_1 = self.clone(); + let count = self.beacon_node_count() as u16; println!("Adding beacon node.."); { let read_lock = self.beacon_nodes.read(); @@ -99,20 +132,38 @@ impl<E: EthSpec> LocalNetwork<E> { .enr() .expect("bootnode must have a network"), ); - let count = self.beacon_node_count() as u16; beacon_config.network.discovery_port = BOOTNODE_PORT + count; beacon_config.network.libp2p_port = BOOTNODE_PORT + count; beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT + count); beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT + count); beacon_config.network.discv5_config.table_filter = |_| true; } + if let Some(el_config) = &mut beacon_config.execution_layer { + let config = MockExecutionConfig { + server_config: MockServerConfig { + listen_port: EXECUTION_PORT + count, + ..Default::default() + }, + terminal_block: TERMINAL_BLOCK, + terminal_difficulty: TERMINAL_DIFFICULTY.into(), + ..Default::default() + }; + let execution_node = LocalExecutionNode::new( + self.context.service_context(format!("node_{}_el", count)), + config, + ); + el_config.default_datadir = execution_node.datadir.path().to_path_buf(); + el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")]; + el_config.execution_endpoints = + vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()]; + self.execution_nodes.write().push(execution_node); + } // We create the beacon node without holding the lock, so that the lock isn't held // across the await. This is only correct if this function never runs in parallel // with itself (which at the time of writing, it does not). - let index = self_1.beacon_nodes.read().len(); let beacon_node = LocalBeaconNode::production( - self.context.service_context(format!("node_{}", index)), + self.context.service_context(format!("node_{}", count)), beacon_config, ) .await?; @@ -184,6 +235,16 @@ impl<E: EthSpec> LocalNetwork<E> { .map(|body| body.unwrap().data.finalized.epoch) } + pub fn mine_pow_blocks(&self, block_number: u64) -> Result<(), String> { + let execution_nodes = self.execution_nodes.read(); + for execution_node in execution_nodes.iter() { + let mut block_gen = execution_node.server.ctx.execution_block_generator.write(); + block_gen.insert_pow_block(block_number)?; + println!("Mined pow block {}", block_number); + } + Ok(()) + } + pub async fn duration_to_genesis(&self) -> Duration { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); diff --git a/testing/web3signer_tests/build.rs b/testing/web3signer_tests/build.rs index ac34b5197f..f62dff0b6f 100644 --- a/testing/web3signer_tests/build.rs +++ b/testing/web3signer_tests/build.rs @@ -29,6 +29,8 @@ pub async fn download_binary(dest_dir: PathBuf) { let version = if let Some(version) = FIXED_VERSION_STRING { version.to_string() + } else if let Ok(env_version) = env::var("LIGHTHOUSE_WEB3SIGNER_VERSION") { + env_version } else { // Get the latest release of the web3 signer repo. let latest_response: Value = client diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index e39e6515fc..4f9a574f84 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -15,7 +15,7 @@ #[cfg(all(test, unix, not(debug_assertions)))] mod tests { use account_utils::validator_definitions::{ - SigningDefinition, ValidatorDefinition, ValidatorDefinitions, + SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; @@ -302,6 +302,7 @@ mod tests { let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let config = validator_client::Config::default(); let validator_store = ValidatorStore::<_, E>::new( initialized_validators, @@ -310,6 +311,7 @@ mod tests { spec, None, slot_clock, + &config, executor, log.clone(), ); @@ -358,6 +360,8 @@ mod tests { voting_public_key: validator_pubkey.clone(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::default(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: signer_rig.keystore_path.clone(), @@ -374,14 +378,16 @@ mod tests { voting_public_key: validator_pubkey.clone(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::default(), - signing_definition: SigningDefinition::Web3Signer { + signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url: signer_rig.url.to_string(), root_certificate_path: Some(root_certificate_path()), request_timeout_ms: None, client_identity_path: Some(client_identity_path()), client_identity_password: Some(client_identity_password()), - }, + }), }; ValidatorStoreRig::new(vec![validator_definition], spec).await }; @@ -449,8 +455,6 @@ mod tests { } } - //TODO: remove this once the consensys web3signer includes the `validator_registration` method - #[allow(dead_code)] fn get_validator_registration(pubkey: PublicKeyBytes) -> ValidatorRegistrationData { let fee_recipient = Address::repeat_byte(42); ValidatorRegistrationData { @@ -512,16 +516,17 @@ mod tests { .await .unwrap() }) - //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method - // - // .await - // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { - // let val_reg_data = get_validator_registration(pubkey); - // validator_store - // .sign_validator_registration_data(val_reg_data) - // .await - // .unwrap() - // }) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); + validator_store + .sign_validator_registration_data(val_reg_data) + .await + .unwrap() + }, + ) .await; } @@ -598,16 +603,39 @@ mod tests { .unwrap() }, ) - //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method - // - // .await - // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { - // let val_reg_data = get_validator_registration(pubkey); - // validator_store - // .sign_validator_registration_data(val_reg_data) - // .await - // .unwrap() - // }) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); + validator_store + .sign_validator_registration_data(val_reg_data) + .await + .unwrap() + }, + ) + .await; + } + + /// Test all the Merge types. + async fn test_merge_types(network: &str, listen_port: u16) { + let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); + let spec = &network_config.chain_spec::<E>().unwrap(); + let merge_fork_slot = spec + .bellatrix_fork_epoch + .unwrap() + .start_slot(E::slots_per_epoch()); + + TestingRig::new(network, spec.clone(), listen_port) + .await + .assert_signatures_match("beacon_block_merge", |pubkey, validator_store| async move { + let mut merge_block = BeaconBlockMerge::empty(spec); + merge_block.slot = merge_fork_slot; + validator_store + .sign_block(pubkey, BeaconBlock::Merge(merge_block), merge_fork_slot) + .await + .unwrap() + }) .await; } @@ -630,4 +658,19 @@ mod tests { async fn prater_altair_types() { test_altair_types("prater", 4247).await } + + #[tokio::test] + async fn ropsten_base_types() { + test_base_types("ropsten", 4250).await + } + + #[tokio::test] + async fn ropsten_altair_types() { + test_altair_types("ropsten", 4251).await + } + + #[tokio::test] + async fn ropsten_merge_types() { + test_merge_types("ropsten", 4252).await + } } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 2ba81eac7a..d47546eb0d 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -11,9 +11,7 @@ use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; use tokio::sync::mpsc; -use types::{ - BlindedPayload, BlockType, Epoch, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot, -}; +use types::{BlindedPayload, BlockType, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot}; #[derive(Debug)] pub enum BlockError { @@ -44,7 +42,7 @@ pub struct BlockServiceBuilder<T, E: EthSpec> { context: Option<RuntimeContext<E>>, graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, - private_tx_proposals: bool, + strict_fee_recipient: bool, } impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { @@ -56,7 +54,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { context: None, graffiti: None, graffiti_file: None, - private_tx_proposals: false, + strict_fee_recipient: false, } } @@ -90,8 +88,8 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { self } - pub fn private_tx_proposals(mut self, private_tx_proposals: bool) -> Self { - self.private_tx_proposals = private_tx_proposals; + pub fn strict_fee_recipient(mut self, strict_fee_recipient: bool) -> Self { + self.strict_fee_recipient = strict_fee_recipient; self } @@ -112,7 +110,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { .ok_or("Cannot build BlockService without runtime_context")?, graffiti: self.graffiti, graffiti_file: self.graffiti_file, - private_tx_proposals: self.private_tx_proposals, + strict_fee_recipient: self.strict_fee_recipient, }), }) } @@ -126,7 +124,7 @@ pub struct Inner<T, E: EthSpec> { context: RuntimeContext<E>, graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, - private_tx_proposals: bool, + strict_fee_recipient: bool, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -235,32 +233,29 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { ) } - let private_tx_proposals = self.private_tx_proposals; - let merge_slot = self - .context - .eth2_config - .spec - .bellatrix_fork_epoch - .unwrap_or_else(Epoch::max_value) - .start_slot(E::slots_per_epoch()); for validator_pubkey in proposers { + let builder_proposals = self + .validator_store + .get_builder_proposals(&validator_pubkey); let service = self.clone(); let log = log.clone(); self.inner.context.executor.spawn( async move { - let publish_result = if private_tx_proposals && slot >= merge_slot { + let publish_result = if builder_proposals { let mut result = service.clone() .publish_block::<BlindedPayload<E>>(slot, validator_pubkey) .await; match result.as_ref() { Err(BlockError::Recoverable(e)) => { - error!(log, "Error whilst producing a blinded block, attempting to publish full block"; "error" => ?e); + error!(log, "Error whilst producing a blinded block, attempting to \ + publish full block"; "error" => ?e); result = service .publish_block::<FullPayload<E>>(slot, validator_pubkey) .await; }, Err(BlockError::Irrecoverable(e)) => { - error!(log, "Error whilst producing a blinded block, cannot fallback because block was signed"; "error" => ?e); + error!(log, "Error whilst producing a blinded block, cannot fallback \ + because the block was signed"; "error" => ?e); }, _ => {}, }; @@ -328,16 +323,19 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; + let fee_recipient = self.validator_store.get_fee_recipient(&validator_pubkey); + + let strict_fee_recipient = self.strict_fee_recipient; // Request block from first responsive beacon node. let block = self .beacon_nodes .first_success(RequireSynced::No, |beacon_node| async move { - let get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); let block = match Payload::block_type() { BlockType::Full => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); beacon_node .get_validator_blocks::<E, Payload>( slot, @@ -354,6 +352,10 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { .data } BlockType::Blinded => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); beacon_node .get_validator_blinded_blocks::<E, Payload>( slot, @@ -370,7 +372,17 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { .data } }; - drop(get_timer); + + // Ensure the correctness of the execution payload's fee recipient. + if strict_fee_recipient { + if let Ok(execution_payload) = block.body().execution_payload() { + if Some(execution_payload.fee_recipient()) != fee_recipient { + return Err(BlockError::Recoverable( + "Incorrect fee recipient used by builder".to_string(), + )); + } + } + } if proposer_index != Some(block.proposer_index()) { return Err(BlockError::Recoverable( @@ -392,43 +404,51 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { // Publish block with first available beacon node. self.beacon_nodes .first_success(RequireSynced::No, |beacon_node| async { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - match Payload::block_type() { - BlockType::Full => beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })?, - BlockType::Blinded => beacon_node - .post_beacon_blinded_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })?, + BlockType::Full => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + BlockType::Blinded => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blinded_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } } - - info!( - log, - "Successfully published block"; - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); Ok::<_, BlockError>(()) }) .await?; + + info!( + log, + "Successfully published block"; + "block_type" => ?Payload::block_type(), + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); Ok(()) } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index d02e26ace0..ceca31aa75 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -136,14 +136,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("FEE-RECIPIENT") .takes_value(true) ) - .arg( - Arg::with_name("suggested-fee-recipient-file") - .long("suggested-fee-recipient-file") - .help("The fallback address provided to the BN if nothing suitable is found \ - in the validator definitions.") - .value_name("FEE-RECIPIENT-FILE") - .takes_value(true) - ) /* REST API related arguments */ .arg( Arg::with_name("http") @@ -259,11 +251,43 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(false), ) .arg( - Arg::with_name("private-tx-proposals") - .long("private-tx-proposals") + Arg::with_name("builder-proposals") + .long("builder-proposals") + .alias("private-tx-proposals") .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") .takes_value(false), ) + .arg( + Arg::with_name("strict-fee-recipient") + .long("strict-fee-recipient") + .help("If this flag is set, Lighthouse will refuse to sign any block whose \ + `fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. \ + This applies to both the normal block proposal flow, as well as block proposals \ + through the builder API. Proposals through the builder API are more likely to have a \ + discrepancy in `fee_recipient` so you should be aware of how your connected relay \ + sends proposer payments before using this flag. If this flag is used, a fee recipient \ + mismatch in the builder API flow will result in a fallback to the local execution engine \ + for payload construction, where a strict fee recipient check will still be applied.") + .takes_value(false), + ) + .arg( + Arg::with_name("builder-registration-timestamp-override") + .long("builder-registration-timestamp-override") + .alias("builder-registration-timestamp-override") + .help("This flag takes a unix timestamp value that will be used to override the \ + timestamp used in the builder api registration") + .takes_value(true), + ) + .arg( + Arg::with_name("gas-limit") + .long("gas-limit") + .value_name("INTEGER") + .takes_value(true) + .help("The gas limit to be used in all builder proposals for all validators managed \ + by this validator client. Note this will not necessarily be used if the gas limit \ + set here moves too far from the previous block's gas limit. [default: 30,000,000]") + .requires("builder-proposals"), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index e56e64f5ad..42c91927ca 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,4 +1,3 @@ -use crate::fee_recipient_file::FeeRecipientFile; use crate::graffiti_file::GraffitiFile; use crate::{http_api, http_metrics}; use clap::ArgMatches; @@ -44,8 +43,6 @@ pub struct Config { pub graffiti_file: Option<GraffitiFile>, /// Fallback fallback address. pub fee_recipient: Option<Address>, - /// Fee recipient file to load per validator suggested-fee-recipients. - pub fee_recipient_file: Option<FeeRecipientFile>, /// Configuration for the HTTP REST API. pub http_api: http_api::Config, /// Configuration for the HTTP REST API. @@ -55,10 +52,18 @@ pub struct Config { /// If true, enable functionality that monitors the network for attestations or proposals from /// any of the validators managed by this client before starting up. pub enable_doppelganger_protection: bool, - pub private_tx_proposals: bool, + /// Enable use of the blinded block endpoints during proposals. + pub builder_proposals: bool, + /// Overrides the timestamp field in builder api ValidatorRegistrationV1 + pub builder_registration_timestamp_override: Option<u64>, + /// Fallback gas limit. + pub gas_limit: Option<u64>, /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option<Vec<PathBuf>>, + /// Enabling this will make sure the validator client never signs a block whose `fee_recipient` + /// does not match the `suggested_fee_recipient`. + pub strict_fee_recipient: bool, } impl Default for Config { @@ -86,13 +91,15 @@ impl Default for Config { graffiti: None, graffiti_file: None, fee_recipient: None, - fee_recipient_file: None, http_api: <_>::default(), http_metrics: <_>::default(), monitoring_api: None, enable_doppelganger_protection: false, beacon_nodes_tls_certs: None, - private_tx_proposals: false, + builder_proposals: false, + builder_registration_timestamp_override: None, + gas_limit: None, + strict_fee_recipient: false, } } } @@ -206,19 +213,6 @@ impl Config { } } - if let Some(fee_recipient_file_path) = cli_args.value_of("suggested-fee-recipient-file") { - let mut fee_recipient_file = FeeRecipientFile::new(fee_recipient_file_path.into()); - fee_recipient_file - .read_fee_recipient_file() - .map_err(|e| format!("Error reading suggested-fee-recipient file: {:?}", e))?; - config.fee_recipient_file = Some(fee_recipient_file); - info!( - log, - "Successfully loaded suggested-fee-recipient file"; - "path" => fee_recipient_file_path - ); - } - if let Some(input_fee_recipient) = parse_optional::<Address>(cli_args, "suggested-fee-recipient")? { @@ -313,8 +307,31 @@ impl Config { config.enable_doppelganger_protection = true; } - if cli_args.is_present("private-tx-proposals") { - config.private_tx_proposals = true; + if cli_args.is_present("builder-proposals") { + config.builder_proposals = true; + } + + config.gas_limit = cli_args + .value_of("gas-limit") + .map(|gas_limit| { + gas_limit + .parse::<u64>() + .map_err(|_| "gas-limit is not a valid u64.") + }) + .transpose()?; + + if let Some(registration_timestamp_override) = + cli_args.value_of("builder-registration-timestamp-override") + { + config.builder_registration_timestamp_override = Some( + registration_timestamp_override + .parse::<u64>() + .map_err(|_| "builder-registration-timestamp-override is not a valid u64.")?, + ); + } + + if cli_args.is_present("strict-fee-recipient") { + config.strict_fee_recipient = true; } Ok(config) diff --git a/validator_client/src/fee_recipient_file.rs b/validator_client/src/fee_recipient_file.rs deleted file mode 100644 index 637ca6d3d5..0000000000 --- a/validator_client/src/fee_recipient_file.rs +++ /dev/null @@ -1,184 +0,0 @@ -use serde_derive::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::fs::File; -use std::io::{prelude::*, BufReader}; -use std::path::PathBuf; -use std::str::FromStr; - -use bls::PublicKeyBytes; -use types::Address; - -#[derive(Debug)] -#[allow(clippy::enum_variant_names)] -pub enum Error { - InvalidFile(std::io::Error), - InvalidLine(String), - InvalidPublicKey(String), - InvalidFeeRecipient(String), -} - -/// Struct to load validator fee-recipients from file. -/// The fee-recipient file is expected to have the following structure -/// -/// default: 0x00000000219ab540356cbb839cbe05303d7705fa -/// public_key1: fee-recipient1 -/// public_key2: fee-recipient2 -/// ... -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FeeRecipientFile { - fee_recipient_path: PathBuf, - fee_recipients: HashMap<PublicKeyBytes, Address>, - default: Option<Address>, -} - -impl FeeRecipientFile { - pub fn new(fee_recipient_path: PathBuf) -> Self { - Self { - fee_recipient_path, - fee_recipients: HashMap::new(), - default: None, - } - } - - /// Returns the fee-recipient corresponding to the given public key if present, else returns the - /// default fee-recipient. - /// - /// Returns an error if loading from the fee-recipient file fails. - pub fn get_fee_recipient(&self, public_key: &PublicKeyBytes) -> Result<Option<Address>, Error> { - Ok(self - .fee_recipients - .get(public_key) - .copied() - .or(self.default)) - } - - /// Loads the fee-recipient file and populates the default fee-recipient and `fee_recipients` hashmap. - /// Returns the fee-recipient corresponding to the given public key if present, else returns the - /// default fee-recipient. - /// - /// Returns an error if loading from the fee-recipient file fails. - pub fn load_fee_recipient( - &mut self, - public_key: &PublicKeyBytes, - ) -> Result<Option<Address>, Error> { - self.read_fee_recipient_file()?; - Ok(self - .fee_recipients - .get(public_key) - .copied() - .or(self.default)) - } - - /// Reads from a fee-recipient file with the specified format and populates the default value - /// and the hashmap. - /// - /// Returns an error if the file does not exist, or if the format is invalid. - pub fn read_fee_recipient_file(&mut self) -> Result<(), Error> { - let file = File::open(self.fee_recipient_path.as_path()).map_err(Error::InvalidFile)?; - let reader = BufReader::new(file); - - let lines = reader.lines(); - - self.default = None; - self.fee_recipients.clear(); - - for line in lines { - let line = line.map_err(|e| Error::InvalidLine(e.to_string()))?; - let (pk_opt, fee_recipient) = read_line(&line)?; - match pk_opt { - Some(pk) => { - self.fee_recipients.insert(pk, fee_recipient); - } - None => self.default = Some(fee_recipient), - } - } - Ok(()) - } -} - -/// Parses a line from the fee-recipient file. -/// -/// `Ok((None, fee_recipient))` represents the fee-recipient for the default key. -/// `Ok((Some(pk), fee_recipient))` represents fee-recipient for the public key `pk`. -/// Returns an error if the line is in the wrong format or does not contain a valid public key or fee-recipient. -fn read_line(line: &str) -> Result<(Option<PublicKeyBytes>, Address), Error> { - if let Some(i) = line.find(':') { - let (key, value) = line.split_at(i); - // Note: `value.len() >=1` so `value[1..]` is safe - let fee_recipient = Address::from_str(value[1..].trim()) - .map_err(|e| Error::InvalidFeeRecipient(e.to_string()))?; - if key == "default" { - Ok((None, fee_recipient)) - } else { - let pk = PublicKeyBytes::from_str(key).map_err(Error::InvalidPublicKey)?; - Ok((Some(pk), fee_recipient)) - } - } else { - Err(Error::InvalidLine(format!("Missing delimiter: {}", line))) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use bls::Keypair; - use std::io::LineWriter; - use tempfile::TempDir; - - const DEFAULT_FEE_RECIPIENT: &str = "0x00000000219ab540356cbb839cbe05303d7705fa"; - const CUSTOM_FEE_RECIPIENT1: &str = "0x4242424242424242424242424242424242424242"; - const CUSTOM_FEE_RECIPIENT2: &str = "0x0000000000000000000000000000000000000001"; - const PK1: &str = "0x800012708dc03f611751aad7a43a082142832b5c1aceed07ff9b543cf836381861352aa923c70eeb02018b638aa306aa"; - const PK2: &str = "0x80001866ce324de7d80ec73be15e2d064dcf121adf1b34a0d679f2b9ecbab40ce021e03bb877e1a2fe72eaaf475e6e21"; - - // Create a fee-recipient file in the required format and return a path to the file. - fn create_fee_recipient_file() -> PathBuf { - let temp = TempDir::new().unwrap(); - let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); - let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); - - let file_name = temp.into_path().join("fee_recipient.txt"); - - let file = File::create(&file_name).unwrap(); - let mut fee_recipient_file = LineWriter::new(file); - fee_recipient_file - .write_all(format!("default: {}\n", DEFAULT_FEE_RECIPIENT).as_bytes()) - .unwrap(); - fee_recipient_file - .write_all(format!("{}: {}\n", pk1.as_hex_string(), CUSTOM_FEE_RECIPIENT1).as_bytes()) - .unwrap(); - fee_recipient_file - .write_all(format!("{}: {}\n", pk2.as_hex_string(), CUSTOM_FEE_RECIPIENT2).as_bytes()) - .unwrap(); - fee_recipient_file.flush().unwrap(); - file_name - } - - #[test] - fn test_load_fee_recipient() { - let fee_recipient_file_path = create_fee_recipient_file(); - let mut gf = FeeRecipientFile::new(fee_recipient_file_path); - - let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); - let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); - - // Read once - gf.read_fee_recipient_file().unwrap(); - - assert_eq!( - gf.load_fee_recipient(&pk1).unwrap().unwrap(), - Address::from_str(CUSTOM_FEE_RECIPIENT1).unwrap() - ); - assert_eq!( - gf.load_fee_recipient(&pk2).unwrap().unwrap(), - Address::from_str(CUSTOM_FEE_RECIPIENT2).unwrap() - ); - - // Random pk should return the default fee-recipient - let random_pk = Keypair::random().pk.compress(); - assert_eq!( - gf.load_fee_recipient(&random_pk).unwrap().unwrap(), - Address::from_str(DEFAULT_FEE_RECIPIENT).unwrap() - ); - } -} diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index db59c25f75..a32ccce627 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -140,6 +140,8 @@ pub async fn create_validators_mnemonic<P: AsRef<Path>, T: 'static + SlotClock, request.enable, request.graffiti.clone(), request.suggested_fee_recipient, + request.gas_limit, + request.builder_proposals, ) .await .map_err(|e| { @@ -154,6 +156,8 @@ pub async fn create_validators_mnemonic<P: AsRef<Path>, T: 'static + SlotClock, description: request.description.clone(), graffiti: request.graffiti.clone(), suggested_fee_recipient: request.suggested_fee_recipient, + gas_limit: request.gas_limit, + builder_proposals: request.builder_proposals, voting_pubkey, eth1_deposit_tx_data: eth2_serde_utils::hex::encode(ð1_deposit_data.rlp), deposit_gwei: request.deposit_gwei, diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index f88aacfca8..29af8d0205 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -205,6 +205,8 @@ fn import_single_keystore<T: SlotClock + 'static, E: EthSpec>( true, None, None, + None, + None, )) .map_err(|e| format!("failed to initialize validator: {:?}", e))?; diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 9ee983a35a..a5d8d0e71c 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -7,12 +7,13 @@ mod tests; use crate::ValidatorStore; use account_utils::{ mnemonic_from_phrase, - validator_definitions::{SigningDefinition, ValidatorDefinition}, + validator_definitions::{SigningDefinition, ValidatorDefinition, Web3SignerDefinition}, }; +pub use api_secret::ApiSecret; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; use eth2::lighthouse_vc::{ - std_types::AuthResponse, - types::{self as api_types, PublicKey, PublicKeyBytes}, + std_types::{AuthResponse, GetFeeRecipientResponse}, + types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; use serde::{Deserialize, Serialize}; @@ -35,8 +36,6 @@ use warp::{ Filter, }; -pub use api_secret::ApiSecret; - #[derive(Debug)] pub enum Error { Warp(warp::Error), @@ -414,6 +413,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( let voting_password = body.password.clone(); let graffiti = body.graffiti.clone(); let suggested_fee_recipient = body.suggested_fee_recipient; + let gas_limit = body.gas_limit; + let builder_proposals = body.builder_proposals; let validator_def = { if let Some(handle) = task_executor.handle() { @@ -424,6 +425,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( body.enable, graffiti, suggested_fee_recipient, + gas_limit, + builder_proposals, )) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -470,14 +473,19 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( voting_public_key: web3signer.voting_public_key, graffiti: web3signer.graffiti, suggested_fee_recipient: web3signer.suggested_fee_recipient, + gas_limit: web3signer.gas_limit, + builder_proposals: web3signer.builder_proposals, description: web3signer.description, - signing_definition: SigningDefinition::Web3Signer { - url: web3signer.url, - root_certificate_path: web3signer.root_certificate_path, - request_timeout_ms: web3signer.request_timeout_ms, - client_identity_path: web3signer.client_identity_path, - client_identity_password: web3signer.client_identity_password, - }, + signing_definition: SigningDefinition::Web3Signer( + Web3SignerDefinition { + url: web3signer.url, + root_certificate_path: web3signer.root_certificate_path, + request_timeout_ms: web3signer.request_timeout_ms, + client_identity_path: web3signer.client_identity_path, + client_identity_password: web3signer + .client_identity_password, + }, + ), }) .collect(); handle.block_on(create_validators_web3signer( @@ -513,18 +521,32 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( let initialized_validators_rw_lock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rw_lock.write(); - match initialized_validators.is_enabled(&validator_pubkey) { - None => Err(warp_utils::reject::custom_not_found(format!( + match ( + initialized_validators.is_enabled(&validator_pubkey), + initialized_validators.validator(&validator_pubkey.compress()), + ) { + (None, _) => Err(warp_utils::reject::custom_not_found(format!( "no validator for {:?}", validator_pubkey ))), - Some(enabled) if enabled == body.enabled => Ok(()), - Some(_) => { + (Some(is_enabled), Some(initialized_validator)) + if Some(is_enabled) == body.enabled + && initialized_validator.get_gas_limit() == body.gas_limit + && initialized_validator.get_builder_proposals() + == body.builder_proposals => + { + Ok(()) + } + (Some(_), _) => { if let Some(handle) = task_executor.handle() { handle .block_on( - initialized_validators - .set_validator_status(&validator_pubkey, body.enabled), + initialized_validators.set_validator_definition_fields( + &validator_pubkey, + body.enabled, + body.gas_limit, + body.builder_proposals, + ), ) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -562,6 +584,123 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); let std_remotekeys = eth_v1.and(warp::path("remotekeys")).and(warp::path::end()); + // GET /eth/v1/validator/{pubkey}/feerecipient + let get_fee_recipient = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("feerecipient")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc<ValidatorStore<T, E>>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .get_fee_recipient(&PublicKeyBytes::from(&validator_pubkey)) + .map(|fee_recipient| { + GenericResponse::from(GetFeeRecipientResponse { + pubkey: PublicKeyBytes::from(validator_pubkey.clone()), + ethaddress: fee_recipient, + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_server_error( + "no fee recipient set".to_string(), + ) + }) + }) + }, + ); + + // POST /eth/v1/validator/{pubkey}/feerecipient + let post_fee_recipient = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::body::json()) + .and(warp::path("feerecipient")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, + request: api_types::UpdateFeeRecipientRequest, + validator_store: Arc<ValidatorStore<T, E>>, + signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .set_validator_fee_recipient(&validator_pubkey, request.ethaddress) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting fee recipient: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::ACCEPTED)); + + // DELETE /eth/v1/validator/{pubkey}/feerecipient + let delete_fee_recipient = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("feerecipient")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc<ValidatorStore<T, E>>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .delete_validator_fee_recipient(&validator_pubkey) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting fee recipient removal: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -647,6 +786,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) + .or(get_fee_recipient) .or(get_std_keystores) .or(get_std_remotekeys), ) @@ -655,11 +795,16 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .or(post_validators_keystore) .or(post_validators_mnemonic) .or(post_validators_web3signer) + .or(post_fee_recipient) .or(post_std_keystores) .or(post_std_remotekeys), )) .or(warp::patch().and(patch_validators)) - .or(warp::delete().and(delete_std_keystores.or(delete_std_remotekeys))), + .or(warp::delete().and( + delete_fee_recipient + .or(delete_std_keystores) + .or(delete_std_remotekeys), + )), ) // The auth route is the only route that is allowed to be accessed without the API token. .or(warp::get().and(get_auth)) diff --git a/validator_client/src/http_api/remotekeys.rs b/validator_client/src/http_api/remotekeys.rs index 402396d4b4..991dfb8bf7 100644 --- a/validator_client/src/http_api/remotekeys.rs +++ b/validator_client/src/http_api/remotekeys.rs @@ -1,6 +1,8 @@ //! Implementation of the standard remotekey management API. use crate::{initialized_validators::Error, InitializedValidators, ValidatorStore}; -use account_utils::validator_definitions::{SigningDefinition, ValidatorDefinition}; +use account_utils::validator_definitions::{ + SigningDefinition, ValidatorDefinition, Web3SignerDefinition, +}; use eth2::lighthouse_vc::std_types::{ DeleteRemotekeyStatus, DeleteRemotekeysRequest, DeleteRemotekeysResponse, ImportRemotekeyStatus, ImportRemotekeysRequest, ImportRemotekeysResponse, @@ -31,11 +33,13 @@ pub fn list<T: SlotClock + 'static, E: EthSpec>( match &def.signing_definition { SigningDefinition::LocalKeystore { .. } => None, - SigningDefinition::Web3Signer { url, .. } => Some(SingleListRemotekeysResponse { - pubkey: validating_pubkey, - url: url.clone(), - readonly: false, - }), + SigningDefinition::Web3Signer(Web3SignerDefinition { url, .. }) => { + Some(SingleListRemotekeysResponse { + pubkey: validating_pubkey, + url: url.clone(), + readonly: false, + }) + } } }) .collect::<Vec<_>>(); @@ -119,14 +123,16 @@ fn import_single_remotekey<T: SlotClock + 'static, E: EthSpec>( voting_public_key: pubkey, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::from("Added by remotekey API"), - signing_definition: SigningDefinition::Web3Signer { + signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url, root_certificate_path: None, request_timeout_ms: None, client_identity_path: None, client_identity_password: None, - }, + }), }; handle .block_on(validator_store.add_validator(web3signer_validator)) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 210555d9c0..e67a82634c 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -36,6 +36,7 @@ use tokio::runtime::Runtime; use tokio::sync::oneshot; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; +pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); type E = MainnetEthSpec; @@ -82,6 +83,7 @@ impl ApiTester { let mut config = Config::default(); config.validator_dir = validator_dir.path().into(); config.secrets_dir = secrets_dir.path().into(); + config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); let spec = E::default_spec(); @@ -102,6 +104,7 @@ impl ApiTester { spec, Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock, + &config, executor.clone(), log.clone(), )); @@ -185,7 +188,7 @@ impl ApiTester { missing_token_client.send_authorization_header(false); match func(missing_token_client).await { Err(ApiError::ServerMessage(ApiErrorMessage { - code: 400, message, .. + code: 401, message, .. })) if message.contains("missing Authorization header") => (), Err(other) => panic!("expected missing header error, got {:?}", other), Ok(_) => panic!("expected missing header error, got Ok"), @@ -268,6 +271,8 @@ impl ApiTester { description: format!("boi #{}", i), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, deposit_gwei: E::default_spec().max_effective_balance, }) .collect::<Vec<_>>(); @@ -399,6 +404,8 @@ impl ApiTester { keystore, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, }; self.client @@ -417,6 +424,8 @@ impl ApiTester { keystore, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, }; let response = self @@ -453,6 +462,8 @@ impl ApiTester { description: format!("{}", i), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: kp.pk, url: format!("http://signer_{}.com/", i), root_certificate_path: None, @@ -482,7 +493,7 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, enabled) + .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None) .await .unwrap(); @@ -519,6 +530,56 @@ impl ApiTester { self } + + pub async fn set_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators(&validator.voting_pubkey, None, Some(gas_limit), None) + .await + .unwrap(); + + self + } + + pub async fn assert_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store.get_gas_limit(&validator.voting_pubkey), + gas_limit + ); + + self + } + + pub async fn set_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + Some(builder_proposals), + ) + .await + .unwrap(); + + self + } + + pub async fn assert_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store + .get_builder_proposals(&validator.voting_pubkey), + builder_proposals + ); + + self + } } struct HdValidatorScenario { @@ -581,6 +642,8 @@ fn routes_with_invalid_auth() { description: <_>::default(), graffiti: <_>::default(), suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), deposit_gwei: <_>::default(), }]) .await @@ -610,13 +673,15 @@ fn routes_with_invalid_auth() { keystore, graffiti: <_>::default(), suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), }) .await }) .await .test_with_invalid_auth(|client| async move { client - .patch_lighthouse_validators(&PublicKeyBytes::empty(), false) + .patch_lighthouse_validators(&PublicKeyBytes::empty(), Some(false), None, None) .await }) .await @@ -733,6 +798,74 @@ fn validator_enabling() { }); } +#[test] +fn validator_gas_limit() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_gas_limit(0, 500) + .await + .assert_gas_limit(0, 500) + .await + // Update gas limit while validator is disabled. + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_gas_limit(0, 1000) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_gas_limit(0, 1000) + .await + }); +} + +#[test] +fn validator_builder_proposals() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_builder_proposals(0, true) + .await + // Test setting builder proposals while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_builder_proposals(0, false) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_builder_proposals(0, false) + .await + }); +} + #[test] fn keystore_validator_creation() { let runtime = build_runtime(); diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index a381378ffe..c3b5f0bb90 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -1,5 +1,7 @@ use super::*; use account_utils::random_password_string; +use bls::PublicKeyBytes; +use eth2::lighthouse_vc::types::UpdateFeeRecipientRequest; use eth2::lighthouse_vc::{ http_client::ValidatorClientHttpClient as HttpClient, std_types::{KeystoreJsonStr as Keystore, *}, @@ -9,6 +11,7 @@ use itertools::Itertools; use rand::{rngs::SmallRng, Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; use std::{collections::HashMap, path::Path}; +use types::Address; fn new_keystore(password: ZeroizeString) -> Keystore { let keypair = Keypair::random(); @@ -36,6 +39,8 @@ fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorReq description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: pubkey, url: web3_signer_url(), root_certificate_path: None, @@ -462,7 +467,7 @@ fn import_and_delete_conflicting_web3_signer_keystores() { for pubkey in &pubkeys { tester .client - .patch_lighthouse_validators(pubkey, false) + .patch_lighthouse_validators(pubkey, Some(false), None, None) .await .unwrap(); } @@ -585,6 +590,185 @@ fn import_invalid_slashing_protection() { }) } +#[test] +fn check_get_set_fee_recipient() { + run_test(|tester: ApiTester| async move { + let _ = &tester; + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::<Vec<_>>(); + let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::<Vec<_>>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_keystore_import_response(&import_res, all_imported(keystores.len())); + + // Check that GET lists all the imported keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_keystore_get_response(&get_res, &keystores); + + // Before setting anything, every fee recipient should be set to TEST_DEFAULT_FEE_RECIPIENT + for pubkey in &all_pubkeys { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: TEST_DEFAULT_FEE_RECIPIENT, + } + ); + } + + use std::str::FromStr; + let fee_recipient_public_key_1 = + Address::from_str("0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b").unwrap(); + let fee_recipient_public_key_2 = + Address::from_str("0x0000000000000000000000000000000000000001").unwrap(); + let fee_recipient_override = + Address::from_str("0x0123456789abcdef0123456789abcdef01234567").unwrap(); + + // set the fee recipient for pubkey[1] using the API + tester + .client + .post_fee_recipient( + &all_pubkeys[1], + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient_public_key_1.clone(), + }, + ) + .await + .expect("should update fee recipient"); + // now everything but pubkey[1] should be TEST_DEFAULT_FEE_RECIPIENT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 1 { + fee_recipient_public_key_1.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + + // set the fee recipient for pubkey[2] using the API + tester + .client + .post_fee_recipient( + &all_pubkeys[2], + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient_public_key_2.clone(), + }, + ) + .await + .expect("should update fee recipient"); + // now everything but pubkey[1] & pubkey[2] should be fee_recipient_file_default + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 1 { + fee_recipient_public_key_1.clone() + } else if i == 2 { + fee_recipient_public_key_2.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + + // should be able to override previous fee_recipient + tester + .client + .post_fee_recipient( + &all_pubkeys[1], + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient_override.clone(), + }, + ) + .await + .expect("should update fee recipient"); + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 1 { + fee_recipient_override.clone() + } else if i == 2 { + fee_recipient_public_key_2.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + + // delete fee recipient for pubkey[1] using the API + tester + .client + .delete_fee_recipient(&all_pubkeys[1]) + .await + .expect("should delete fee recipient"); + // now everything but pubkey[2] should be TEST_DEFAULT_FEE_RECIPIENT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 2 { + fee_recipient_public_key_2.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + }) +} + fn all_indices(count: usize) -> Vec<usize> { (0..count).collect() } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 836aab4c1f..146d008a57 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -11,7 +11,9 @@ pub const UNREGISTERED: &str = "unregistered"; pub const FULL_UPDATE: &str = "full_update"; pub const BEACON_BLOCK: &str = "beacon_block"; pub const BEACON_BLOCK_HTTP_GET: &str = "beacon_block_http_get"; +pub const BLINDED_BEACON_BLOCK_HTTP_GET: &str = "blinded_beacon_block_http_get"; pub const BEACON_BLOCK_HTTP_POST: &str = "beacon_block_http_post"; +pub const BLINDED_BEACON_BLOCK_HTTP_POST: &str = "blinded_beacon_block_http_post"; pub const ATTESTATIONS: &str = "attestations"; pub const ATTESTATIONS_HTTP_GET: &str = "attestations_http_get"; pub const ATTESTATIONS_HTTP_POST: &str = "attestations_http_post"; diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 0d5d4ad76e..66a621eb77 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -10,7 +10,8 @@ use crate::signing_method::SigningMethod; use account_utils::{ read_password, read_password_from_user, validator_definitions::{ - self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, CONFIG_FILENAME, + self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, + CONFIG_FILENAME, }, ZeroizeString, }; @@ -109,6 +110,8 @@ pub struct InitializedValidator { signing_method: Arc<SigningMethod>, graffiti: Option<Graffiti>, suggested_fee_recipient: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, /// The validators index in `state.validators`, to be updated by an external service. index: Option<u64>, } @@ -128,6 +131,22 @@ impl InitializedValidator { SigningMethod::Web3Signer { .. } => None, } } + + pub fn get_suggested_fee_recipient(&self) -> Option<Address> { + self.suggested_fee_recipient + } + + pub fn get_gas_limit(&self) -> Option<u64> { + self.gas_limit + } + + pub fn get_builder_proposals(&self) -> Option<bool> { + self.builder_proposals + } + + pub fn get_index(&self) -> Option<u64> { + self.index + } } fn open_keystore(path: &Path) -> Result<Keystore, Error> { @@ -155,6 +174,7 @@ impl InitializedValidator { def: ValidatorDefinition, key_cache: &mut KeyCache, key_stores: &mut HashMap<PathBuf, Keystore>, + web3_signer_client_map: &mut Option<HashMap<Web3SignerDefinition, Client>>, ) -> Result<Self, Error> { if !def.enabled { return Err(Error::UnableToInitializeDisabledValidator); @@ -239,46 +259,45 @@ impl InitializedValidator { voting_keypair: Arc::new(voting_keypair), } } - SigningDefinition::Web3Signer { - url, - root_certificate_path, - request_timeout_ms, - client_identity_path, - client_identity_password, - } => { - let signing_url = build_web3_signer_url(&url, &def.voting_public_key) + SigningDefinition::Web3Signer(web3_signer) => { + let signing_url = build_web3_signer_url(&web3_signer.url, &def.voting_public_key) .map_err(|e| Error::InvalidWeb3SignerUrl(e.to_string()))?; - let request_timeout = request_timeout_ms + + let request_timeout = web3_signer + .request_timeout_ms .map(Duration::from_millis) .unwrap_or(DEFAULT_REMOTE_SIGNER_REQUEST_TIMEOUT); - let builder = Client::builder().timeout(request_timeout); - - let builder = if let Some(path) = root_certificate_path { - let certificate = load_pem_certificate(path)?; - builder.add_root_certificate(certificate) - } else { - builder - }; - - let builder = if let Some(path) = client_identity_path { - let identity = load_pkcs12_identity( - path, - &client_identity_password - .ok_or(Error::MissingWeb3SignerClientIdentityPassword)?, - )?; - builder.identity(identity) - } else { - if client_identity_password.is_some() { - return Err(Error::MissingWeb3SignerClientIdentityCertificateFile); + // Check if a client has already been initialized for this remote signer url. + let http_client = if let Some(client_map) = web3_signer_client_map { + match client_map.get(&web3_signer) { + Some(client) => client.clone(), + None => { + let client = build_web3_signer_client( + web3_signer.root_certificate_path.clone(), + web3_signer.client_identity_path.clone(), + web3_signer.client_identity_password.clone(), + request_timeout, + )?; + client_map.insert(web3_signer, client.clone()); + client + } } - builder + } else { + // There are no clients in the map. + let mut new_web3_signer_client_map: HashMap<Web3SignerDefinition, Client> = + HashMap::new(); + let client = build_web3_signer_client( + web3_signer.root_certificate_path.clone(), + web3_signer.client_identity_path.clone(), + web3_signer.client_identity_password.clone(), + request_timeout, + )?; + new_web3_signer_client_map.insert(web3_signer, client.clone()); + *web3_signer_client_map = Some(new_web3_signer_client_map); + client }; - let http_client = builder - .build() - .map_err(Error::UnableToBuildWeb3SignerClient)?; - SigningMethod::Web3Signer { signing_url, http_client, @@ -291,6 +310,8 @@ impl InitializedValidator { signing_method: Arc::new(signing_method), graffiti: def.graffiti.map(Into::into), suggested_fee_recipient: def.suggested_fee_recipient, + gas_limit: def.gas_limit, + builder_proposals: def.builder_proposals, index: None, }) } @@ -332,6 +353,39 @@ fn build_web3_signer_url(base_url: &str, voting_public_key: &PublicKey) -> Resul Url::parse(base_url)?.join(&format!("api/v1/eth2/sign/{}", voting_public_key)) } +fn build_web3_signer_client( + root_certificate_path: Option<PathBuf>, + client_identity_path: Option<PathBuf>, + client_identity_password: Option<String>, + request_timeout: Duration, +) -> Result<Client, Error> { + let builder = Client::builder().timeout(request_timeout); + + let builder = if let Some(path) = root_certificate_path { + let certificate = load_pem_certificate(path)?; + builder.add_root_certificate(certificate) + } else { + builder + }; + + let builder = if let Some(path) = client_identity_path { + let identity = load_pkcs12_identity( + path, + &client_identity_password.ok_or(Error::MissingWeb3SignerClientIdentityPassword)?, + )?; + builder.identity(identity) + } else { + if client_identity_password.is_some() { + return Err(Error::MissingWeb3SignerClientIdentityCertificateFile); + } + builder + }; + + builder + .build() + .map_err(Error::UnableToBuildWeb3SignerClient) +} + /// Try to unlock `keystore` at `keystore_path` by prompting the user via `stdin`. fn unlock_keystore_via_stdin_password( keystore: &Keystore, @@ -382,6 +436,8 @@ pub struct InitializedValidators { validators_dir: PathBuf, /// The canonical set of validators. validators: HashMap<PublicKeyBytes, InitializedValidator>, + /// The clients used for communications with a remote signer. + web3_signer_client_map: Option<HashMap<Web3SignerDefinition, Client>>, /// For logging via `slog`. log: Logger, } @@ -397,6 +453,7 @@ impl InitializedValidators { validators_dir, definitions, validators: HashMap::default(), + web3_signer_client_map: None, log, }; this.update_validators().await?; @@ -585,7 +642,28 @@ impl InitializedValidators { .and_then(|v| v.suggested_fee_recipient) } - /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled` values. + /// Returns the `gas_limit` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn gas_limit(&self, public_key: &PublicKeyBytes) -> Option<u64> { + self.validators.get(public_key).and_then(|v| v.gas_limit) + } + + /// Returns the `builder_proposals` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn builder_proposals(&self, public_key: &PublicKeyBytes) -> Option<bool> { + self.validators + .get(public_key) + .and_then(|v| v.builder_proposals) + } + + /// Returns an `Option` of a reference to an `InitializedValidator` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn validator(&self, public_key: &PublicKeyBytes) -> Option<&InitializedValidator> { + self.validators.get(public_key) + } + + /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`, and `builder_proposals` + /// values. /// /// ## Notes /// @@ -593,11 +671,17 @@ impl InitializedValidators { /// disk. A newly enabled validator will be added to `self.validators`, whilst a newly disabled /// validator will be removed from `self.validators`. /// + /// If a `gas_limit` is included in the call to this function, it will also be updated and saved + /// to disk. If `gas_limit` is `None` the `gas_limit` *will not* be unset in `ValidatorDefinition` + /// or `InitializedValidator`. The same logic applies to `builder_proposals`. + /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. - pub async fn set_validator_status( + pub async fn set_validator_definition_fields( &mut self, voting_public_key: &PublicKey, - enabled: bool, + enabled: Option<bool>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, ) -> Result<(), Error> { if let Some(def) = self .definitions @@ -605,11 +689,105 @@ impl InitializedValidators { .iter_mut() .find(|def| def.voting_public_key == *voting_public_key) { - def.enabled = enabled; + // Don't overwrite fields if they are not set in this request. + if let Some(enabled) = enabled { + def.enabled = enabled; + } + if let Some(gas_limit) = gas_limit { + def.gas_limit = Some(gas_limit); + } + if let Some(builder_proposals) = builder_proposals { + def.builder_proposals = Some(builder_proposals); + } } self.update_validators().await?; + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + // Don't overwrite fields if they are not set in this request. + if let Some(gas_limit) = gas_limit { + val.gas_limit = Some(gas_limit); + } + if let Some(builder_proposals) = builder_proposals { + val.builder_proposals = Some(builder_proposals); + } + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + + /// Sets the `InitializedValidator` and `ValidatorDefinition` `suggested_fee_recipient` values. + /// + /// ## Notes + /// + /// Setting a validator `fee_recipient` will cause `self.definitions` to be updated and saved to + /// disk. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn set_validator_fee_recipient( + &mut self, + voting_public_key: &PublicKey, + fee_recipient: Address, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.suggested_fee_recipient = Some(fee_recipient); + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.suggested_fee_recipient = Some(fee_recipient); + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + + /// Removes the `InitializedValidator` and `ValidatorDefinition` `suggested_fee_recipient` values. + /// + /// ## Notes + /// + /// Removing a validator `fee_recipient` will cause `self.definitions` to be updated and saved to + /// disk. The fee_recipient for the validator will then fall back to the process level default if + /// it is set. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn delete_validator_fee_recipient( + &mut self, + voting_public_key: &PublicKey, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.suggested_fee_recipient = None; + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.suggested_fee_recipient = None; + } + self.definitions .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; @@ -754,6 +932,7 @@ impl InitializedValidators { def.clone(), &mut key_cache, &mut key_stores, + &mut None, ) .await { @@ -798,11 +977,12 @@ impl InitializedValidators { } } } - SigningDefinition::Web3Signer { .. } => { + SigningDefinition::Web3Signer(Web3SignerDefinition { .. }) => { match InitializedValidator::from_definition( def.clone(), &mut key_cache, &mut key_stores, + &mut self.web3_signer_client_map, ) .await { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 5e45847598..bb7b296d23 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -5,7 +5,6 @@ mod check_synced; mod cli; mod config; mod duties_service; -mod fee_recipient_file; mod graffiti_file; mod http_metrics; mod key_cache; @@ -73,6 +72,7 @@ const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -281,6 +281,8 @@ impl<T: EthSpec> ProductionValidatorClient<T> { liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, + sync_committee_contribution: slot_duration + / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, } } else { @@ -360,6 +362,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { context.eth2_config.spec.clone(), doppelganger_service.clone(), slot_clock.clone(), + &config, context.executor.clone(), log.clone(), )); @@ -410,7 +413,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) - .private_tx_proposals(config.private_tx_proposals) + .strict_fee_recipient(config.strict_fee_recipient) .build()?; let attestation_service = AttestationServiceBuilder::new() @@ -426,8 +429,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("preparation".into())) - .fee_recipient(config.fee_recipient) - .fee_recipient_file(config.fee_recipient_file.clone()) + .builder_registration_timestamp_override(config.builder_registration_timestamp_override) .build()?; let sync_committee_service = SyncCommitteeService::new( @@ -438,7 +440,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { context.service_context("sync_committee".into()), ); - // Wait until genesis has occured. + // Wait until genesis has occurred. // // It seems most sensible to move this into the `start_service` function, but I'm caution // of making too many changes this close to genesis (<1 week). @@ -485,10 +487,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { self.preparation_service .clone() - .start_update_service( - self.config.private_tx_proposals, - &self.context.eth2_config.spec, - ) + .start_update_service(&self.context.eth2_config.spec) .map_err(|e| format!("Unable to start preparation service: {}", e))?; if let Some(doppelganger_service) = self.doppelganger_service.clone() { diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 34201180c0..b138d3e4ee 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -1,8 +1,5 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::{ - fee_recipient_file::FeeRecipientFile, - validator_store::{DoppelgangerStatus, ValidatorStore}, -}; +use crate::validator_store::{DoppelgangerStatus, ValidatorStore}; use bls::PublicKeyBytes; use environment::RuntimeContext; use parking_lot::RwLock; @@ -25,14 +22,16 @@ const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; /// Number of epochs to wait before re-submitting validator registration. const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1; +/// The number of validator registrations to include per request to the beacon node. +const VALIDATOR_REGISTRATION_BATCH_SIZE: usize = 500; + /// Builds an `PreparationService`. pub struct PreparationServiceBuilder<T: SlotClock + 'static, E: EthSpec> { validator_store: Option<Arc<ValidatorStore<T, E>>>, slot_clock: Option<T>, beacon_nodes: Option<Arc<BeaconNodeFallback<T, E>>>, context: Option<RuntimeContext<E>>, - fee_recipient: Option<Address>, - fee_recipient_file: Option<FeeRecipientFile>, + builder_registration_timestamp_override: Option<u64>, } impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { @@ -42,8 +41,7 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { slot_clock: None, beacon_nodes: None, context: None, - fee_recipient: None, - fee_recipient_file: None, + builder_registration_timestamp_override: None, } } @@ -67,13 +65,11 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { self } - pub fn fee_recipient(mut self, fee_recipient: Option<Address>) -> Self { - self.fee_recipient = fee_recipient; - self - } - - pub fn fee_recipient_file(mut self, fee_recipient_file: Option<FeeRecipientFile>) -> Self { - self.fee_recipient_file = fee_recipient_file; + pub fn builder_registration_timestamp_override( + mut self, + builder_registration_timestamp_override: Option<u64>, + ) -> Self { + self.builder_registration_timestamp_override = builder_registration_timestamp_override; self } @@ -92,8 +88,8 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { context: self .context .ok_or("Cannot build PreparationService without runtime_context")?, - fee_recipient: self.fee_recipient, - fee_recipient_file: self.fee_recipient_file, + builder_registration_timestamp_override: self + .builder_registration_timestamp_override, validator_registration_cache: RwLock::new(HashMap::new()), }), }) @@ -106,8 +102,7 @@ pub struct Inner<T, E: EthSpec> { slot_clock: T, beacon_nodes: Arc<BeaconNodeFallback<T, E>>, context: RuntimeContext<E>, - fee_recipient: Option<Address>, - fee_recipient_file: Option<FeeRecipientFile>, + builder_registration_timestamp_override: Option<u64>, // Used to track unpublished validator registration changes. validator_registration_cache: RwLock<HashMap<ValidatorRegistrationKey, SignedValidatorRegistrationData>>, @@ -158,14 +153,8 @@ impl<T, E: EthSpec> Deref for PreparationService<T, E> { } impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { - pub fn start_update_service( - self, - start_registration_service: bool, - spec: &ChainSpec, - ) -> Result<(), String> { - if start_registration_service { - self.clone().start_validator_registration_service(spec)?; - } + pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { + self.clone().start_validator_registration_service(spec)?; self.start_proposer_prepare_service(spec) } @@ -229,7 +218,7 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { let validator_registration_fut = async move { loop { // Poll the endpoint immediately to ensure fee recipients are received. - if let Err(e) = self.register_validators(&spec).await { + if let Err(e) = self.register_validators().await { error!(log,"Error during validator registration";"error" => ?e); } @@ -272,52 +261,48 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { } fn collect_preparation_data(&self, spec: &ChainSpec) -> Vec<ProposerPreparationData> { - self.collect_data(spec, |_, validator_index, fee_recipient| { - ProposerPreparationData { - validator_index, - fee_recipient, - } - }) - } - - fn collect_validator_registration_keys( - &self, - spec: &ChainSpec, - ) -> Vec<ValidatorRegistrationKey> { - self.collect_data(spec, |pubkey, _, fee_recipient| { - ValidatorRegistrationKey { - fee_recipient, - //TODO(sean) this is geth's default, we should make this configurable and maybe have the default be dynamic. - // Discussion here: https://github.com/ethereum/builder-specs/issues/17 - gas_limit: 30_000_000, - pubkey, - } - }) - } - - fn collect_data<G, U>(&self, spec: &ChainSpec, map_fn: G) -> Vec<U> - where - G: Fn(PublicKeyBytes, u64, Address) -> U, - { let log = self.context.log(); + self.collect_proposal_data(|pubkey, proposal_data| { + if let Some(fee_recipient) = proposal_data.fee_recipient { + Some(ProposerPreparationData { + // Ignore fee recipients for keys without indices, they are inactive. + validator_index: proposal_data.validator_index?, + fee_recipient, + }) + } else { + if spec.bellatrix_fork_epoch.is_some() { + error!( + log, + "Validator is missing fee recipient"; + "msg" => "update validator_definitions.yml", + "pubkey" => ?pubkey + ); + } + None + } + }) + } - let fee_recipient_file = self - .fee_recipient_file - .clone() - .map(|mut fee_recipient_file| { - fee_recipient_file - .read_fee_recipient_file() - .map_err(|e| { - error!( - log, - "Error loading fee-recipient file"; - "error" => ?e - ); + fn collect_validator_registration_keys(&self) -> Vec<ValidatorRegistrationKey> { + self.collect_proposal_data(|pubkey, proposal_data| { + // We don't log for missing fee recipients here because this will be logged more + // frequently in `collect_preparation_data`. + proposal_data.fee_recipient.and_then(|fee_recipient| { + proposal_data + .builder_proposals + .then(|| ValidatorRegistrationKey { + fee_recipient, + gas_limit: proposal_data.gas_limit, + pubkey, }) - .unwrap_or(()); - fee_recipient_file - }); + }) + }) + } + fn collect_proposal_data<G, U>(&self, map_fn: G) -> Vec<U> + where + G: Fn(PublicKeyBytes, ProposalData) -> Option<U>, + { let all_pubkeys: Vec<_> = self .validator_store .voting_pubkeys(DoppelgangerStatus::ignored); @@ -325,38 +310,8 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { all_pubkeys .into_iter() .filter_map(|pubkey| { - // Ignore fee recipients for keys without indices, they are inactive. - let validator_index = self.validator_store.validator_index(&pubkey)?; - - // If there is a `suggested_fee_recipient` in the validator definitions yaml - // file, use that value. - let fee_recipient = self - .validator_store - .suggested_fee_recipient(&pubkey) - .or_else(|| { - // If there's nothing in the validator defs file, check the fee - // recipient file. - fee_recipient_file - .as_ref()? - .get_fee_recipient(&pubkey) - .ok()? - }) - // If there's nothing in the file, try the process-level default value. - .or(self.fee_recipient); - - if let Some(fee_recipient) = fee_recipient { - Some(map_fn(pubkey, validator_index, fee_recipient)) - } else { - if spec.bellatrix_fork_epoch.is_some() { - error!( - log, - "Validator is missing fee recipient"; - "msg" => "update validator_definitions.yml", - "pubkey" => ?pubkey - ); - } - None - } + let proposal_data = self.validator_store.proposal_data(&pubkey)?; + map_fn(pubkey, proposal_data) }) .collect() } @@ -394,8 +349,8 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { } /// Register validators with builders, used in the blinded block proposal flow. - async fn register_validators(&self, spec: &ChainSpec) -> Result<(), String> { - let registration_keys = self.collect_validator_registration_keys(spec); + async fn register_validators(&self) -> Result<(), String> { + let registration_keys = self.collect_validator_registration_keys(); let mut changed_keys = vec![]; @@ -441,10 +396,15 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { let signed_data = if let Some(signed_data) = cached_registration_opt { signed_data } else { - let timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("{e:?}"))? - .as_secs(); + let timestamp = + if let Some(timestamp) = self.builder_registration_timestamp_override { + timestamp + } else { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("{e:?}"))? + .as_secs() + }; let ValidatorRegistrationKey { fee_recipient, @@ -479,29 +439,35 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { } if !signed.is_empty() { - let signed_ref = signed.as_slice(); - - match self - .beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node - .post_validator_register_validator(signed_ref) - .await - }) - .await - { - Ok(()) => debug!( - log, - "Published validator registration"; - "count" => registration_data_len, - ), - Err(e) => error!( - log, - "Unable to publish validator registration"; - "error" => %e, - ), + for batch in signed.chunks(VALIDATOR_REGISTRATION_BATCH_SIZE) { + match self + .beacon_nodes + .first_success(RequireSynced::Yes, |beacon_node| async move { + beacon_node.post_validator_register_validator(batch).await + }) + .await + { + Ok(()) => info!( + log, + "Published validator registrations to the builder network"; + "count" => registration_data_len, + ), + Err(e) => error!( + log, + "Unable to publish validator registrations to the builder network"; + "error" => %e, + ), + } } } Ok(()) } } + +/// A helper struct, used for passing data from the validator store to services. +pub struct ProposalData { + pub(crate) validator_index: Option<u64>, + pub(crate) fee_recipient: Option<Address>, + pub(crate) gas_limit: u64, + pub(crate) builder_proposals: bool, +} diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 0ab37484ba..cf02ae0c32 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -25,6 +25,7 @@ pub enum MessageType { pub enum ForkName { Phase0, Altair, + Bellatrix, } #[derive(Debug, PartialEq, Serialize)] @@ -43,7 +44,10 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload<T>> { Attestation(&'a AttestationData), BeaconBlock { version: ForkName, - block: &'a BeaconBlock<T, Payload>, + #[serde(skip_serializing_if = "Option::is_none")] + block: Option<&'a BeaconBlock<T, Payload>>, + #[serde(skip_serializing_if = "Option::is_none")] + block_header: Option<BeaconBlockHeader>, }, #[allow(dead_code)] Deposit { @@ -70,13 +74,23 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload<T>> { impl<'a, T: EthSpec, Payload: ExecPayload<T>> Web3SignerObject<'a, T, Payload> { pub fn beacon_block(block: &'a BeaconBlock<T, Payload>) -> Result<Self, Error> { - let version = match block { - BeaconBlock::Base(_) => ForkName::Phase0, - BeaconBlock::Altair(_) => ForkName::Altair, - BeaconBlock::Merge(_) => return Err(Error::MergeForkNotSupported), - }; - - Ok(Web3SignerObject::BeaconBlock { version, block }) + match block { + BeaconBlock::Base(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Phase0, + block: Some(block), + block_header: None, + }), + BeaconBlock::Altair(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Altair, + block: Some(block), + block_header: None, + }), + BeaconBlock::Merge(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Bellatrix, + block: None, + block_header: Some(block.block_header()), + }), + } } pub fn message_type(&self) -> MessageType { diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 105bf7d27f..73d0066f20 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -4,7 +4,7 @@ use environment::RuntimeContext; use eth2::types::BlockId; use futures::future::join_all; use futures::future::FutureExt; -use slog::{crit, debug, error, info, trace}; +use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; @@ -174,17 +174,39 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { return Ok(()); } - // Fetch block root for `SyncCommitteeContribution`. - let block_root = self + // Fetch `block_root` and `execution_optimistic` for `SyncCommitteeContribution`. + let response = self .beacon_nodes .first_success(RequireSynced::Yes, |beacon_node| async move { beacon_node.get_beacon_blocks_root(BlockId::Head).await }) .await .map_err(|e| e.to_string())? - .ok_or_else(|| format!("No block root found for slot {}", slot))? - .data - .root; + .ok_or_else(|| format!("No block root found for slot {}", slot))?; + + let block_root = response.data.root; + if let Some(execution_optimistic) = response.execution_optimistic { + if execution_optimistic { + warn!( + log, + "Refusing to sign sync committee messages for optimistic head block"; + "slot" => slot, + ); + return Ok(()); + } + } else if let Some(bellatrix_fork_epoch) = self.duties_service.spec.bellatrix_fork_epoch { + // If the slot is post Bellatrix, do not sign messages when we cannot verify the + // optimistic status of the head block. + if slot.epoch(E::slots_per_epoch()) > bellatrix_fork_epoch { + warn!( + log, + "Refusing to sign sync committee messages for a head block with an unknown \ + optimistic status"; + "slot" => slot, + ); + return Ok(()); + } + } // Spawn one task to publish all of the sync committee signatures. let validator_duties = slot_duties.duties; diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 36ec5e8955..f883d0201f 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -3,6 +3,7 @@ use crate::{ http_metrics::metrics, initialized_validators::InitializedValidators, signing_method::{Error as SigningError, SignableMessage, SigningContext, SigningMethod}, + Config, }; use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; use parking_lot::{Mutex, RwLock}; @@ -27,6 +28,7 @@ use types::{ use validator_dir::ValidatorDir; pub use crate::doppelganger_service::DoppelgangerStatus; +use crate::preparation_service::ProposalData; #[derive(Debug, PartialEq)] pub enum Error { @@ -52,6 +54,11 @@ impl From<SigningError> for Error { /// This acts as a maximum safe-guard against clock drift. const SLASHING_PROTECTION_HISTORY_EPOCHS: u64 = 512; +/// Currently used as the default gas limit in execution clients. +/// +/// https://github.com/ethereum/builder-specs/issues/17 +const DEFAULT_GAS_LIMIT: u64 = 30_000_000; + struct LocalValidator { validator_dir: ValidatorDir, voting_keypair: Keypair, @@ -86,6 +93,9 @@ pub struct ValidatorStore<T, E: EthSpec> { log: Logger, doppelganger_service: Option<Arc<DoppelgangerService>>, slot_clock: T, + fee_recipient_process: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: bool, task_executor: TaskExecutor, _phantom: PhantomData<E>, } @@ -101,6 +111,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { spec: ChainSpec, doppelganger_service: Option<Arc<DoppelgangerService>>, slot_clock: T, + config: &Config, task_executor: TaskExecutor, log: Logger, ) -> Self { @@ -113,6 +124,9 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { log, doppelganger_service, slot_clock, + fee_recipient_process: config.fee_recipient, + gas_limit: config.gas_limit, + builder_proposals: config.builder_proposals, task_executor, _phantom: PhantomData, } @@ -143,6 +157,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { /// Insert a new validator to `self`, where the validator is represented by an EIP-2335 /// keystore on the filesystem. + #[allow(clippy::too_many_arguments)] pub async fn add_validator_keystore<P: AsRef<Path>>( &self, voting_keystore_path: P, @@ -150,12 +165,16 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { enable: bool, graffiti: Option<GraffitiString>, suggested_fee_recipient: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, ) -> Result<ValidatorDefinition, String> { let mut validator_def = ValidatorDefinition::new_keystore_with_password( voting_keystore_path, Some(password), graffiti.map(Into::into), suggested_fee_recipient, + gas_limit, + builder_proposals, ) .map_err(|e| format!("failed to create validator definitions: {:?}", e))?; @@ -197,6 +216,23 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { Ok(validator_def) } + /// Returns `ProposalData` for the provided `pubkey` if it exists in `InitializedValidators`. + /// `ProposalData` fields include defaulting logic described in `get_fee_recipient_defaulting`, + /// `get_gas_limit_defaulting`, and `get_builder_proposals_defaulting`. + pub fn proposal_data(&self, pubkey: &PublicKeyBytes) -> Option<ProposalData> { + self.validators + .read() + .validator(pubkey) + .map(|validator| ProposalData { + validator_index: validator.get_index(), + fee_recipient: self + .get_fee_recipient_defaulting(validator.get_suggested_fee_recipient()), + gas_limit: self.get_gas_limit_defaulting(validator.get_gas_limit()), + builder_proposals: self + .get_builder_proposals_defaulting(validator.get_builder_proposals()), + }) + } + /// Attempts to resolve the pubkey to a validator index. /// /// It may return `None` if the `pubkey` is: @@ -356,12 +392,68 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { self.validators.read().graffiti(validator_pubkey) } - pub fn suggested_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option<Address> { + /// Returns the fee recipient for the given public key. The priority order for fetching + /// the fee recipient is: + /// 1. validator_definitions.yml + /// 2. process level fee recipient + pub fn get_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option<Address> { + // If there is a `suggested_fee_recipient` in the validator definitions yaml + // file, use that value. + self.get_fee_recipient_defaulting(self.suggested_fee_recipient(validator_pubkey)) + } + + pub fn get_fee_recipient_defaulting(&self, fee_recipient: Option<Address>) -> Option<Address> { + // If there's nothing in the file, try the process-level default value. + fee_recipient.or(self.fee_recipient_process) + } + + /// Returns the suggested_fee_recipient from `validator_definitions.yml` if any. + /// This has been pulled into a private function so the read lock is dropped easily + fn suggested_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option<Address> { self.validators .read() .suggested_fee_recipient(validator_pubkey) } + /// Returns the gas limit for the given public key. The priority order for fetching + /// the gas limit is: + /// + /// 1. validator_definitions.yml + /// 2. process level gas limit + /// 3. `DEFAULT_GAS_LIMIT` + pub fn get_gas_limit(&self, validator_pubkey: &PublicKeyBytes) -> u64 { + self.get_gas_limit_defaulting(self.validators.read().gas_limit(validator_pubkey)) + } + + fn get_gas_limit_defaulting(&self, gas_limit: Option<u64>) -> u64 { + // If there is a `gas_limit` in the validator definitions yaml + // file, use that value. + gas_limit + // If there's nothing in the file, try the process-level default value. + .or(self.gas_limit) + // If there's no process-level default, use the `DEFAULT_GAS_LIMIT`. + .unwrap_or(DEFAULT_GAS_LIMIT) + } + + /// Returns a `bool` for the given public key that denotes whther this validator should use the + /// builder API. The priority order for fetching this value is: + /// + /// 1. validator_definitions.yml + /// 2. process level flag + pub fn get_builder_proposals(&self, validator_pubkey: &PublicKeyBytes) -> bool { + // If there is a `suggested_fee_recipient` in the validator definitions yaml + // file, use that value. + self.get_builder_proposals_defaulting( + self.validators.read().builder_proposals(validator_pubkey), + ) + } + + fn get_builder_proposals_defaulting(&self, builder_proposals: Option<bool>) -> bool { + builder_proposals + // If there's nothing in the file, try the process-level default value. + .unwrap_or(self.builder_proposals) + } + pub async fn sign_block<Payload: ExecPayload<E>>( &self, validator_pubkey: PublicKeyBytes,