diff --git a/.github/custom/clippy.toml b/.github/custom/clippy.toml new file mode 100644 index 0000000000..f50e35bcdf --- /dev/null +++ b/.github/custom/clippy.toml @@ -0,0 +1,22 @@ +disallowed-from-async-methods = [ + "tokio::runtime::Handle::block_on", + "tokio::runtime::Runtime::block_on", + "tokio::task::LocalSet::block_on", + "tokio::sync::Mutex::blocking_lock", + "tokio::sync::RwLock::blocking_read", + "tokio::sync::mpsc::Receiver::blocking_recv", + "tokio::sync::mpsc::UnboundedReceiver::blocking_recv", + "tokio::sync::oneshot::Receiver::blocking_recv", + "tokio::sync::mpsc::Sender::blocking_send", + "tokio::sync::RwLock::blocking_write", +] +async-wrapper-methods = [ + "tokio::runtime::Handle::spawn_blocking", + "task_executor::TaskExecutor::spawn_blocking", + "task_executor::TaskExecutor::spawn_blocking_handle", + "warp_utils::task::blocking_task", + "warp_utils::task::blocking_json_task", + "validator_client::http_api::blocking_signed_json_task", + "execution_layer::test_utils::MockServer::new", + "execution_layer::test_utils::MockServer::new_with_config", +] diff --git a/.github/workflows/docker-antithesis.yml b/.github/workflows/docker-antithesis.yml index b7b35d1207..40de0bd0a5 100644 --- a/.github/workflows/docker-antithesis.yml +++ b/.github/workflows/docker-antithesis.yml @@ -15,7 +15,7 @@ env: jobs: build-docker: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - name: Update Rust diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index c23ee8df36..30a891febf 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -21,7 +21,7 @@ jobs: run: docker network create book - name: Run mdbook server - run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:latest serve --hostname 0.0.0.0 + run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:v0.4.20-rust serve --hostname 0.0.0.0 - name: Print logs run: docker logs book diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 13c1af7ab6..35032a0932 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -18,6 +18,9 @@ jobs: steps: - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install ganache run: npm install ganache@latest --global @@ -37,13 +40,29 @@ jobs: run: make && make install-lcli - name: Start local testnet - run: ./start_local_testnet.sh + run: ./start_local_testnet.sh && sleep 60 working-directory: scripts/local_testnet - name: Print logs - run: ./print_logs.sh + run: ./dump_logs.sh working-directory: scripts/local_testnet - name: Stop local testnet run: ./stop_local_testnet.sh working-directory: scripts/local_testnet + + - name: Clean-up testnet + run: ./clean.sh + working-directory: scripts/local_testnet + + - name: Start local testnet with blinded block production + run: ./start_local_testnet.sh -p && sleep 60 + working-directory: scripts/local_testnet + + - name: Print logs for blinded block testnet + run: ./dump_logs.sh + working-directory: scripts/local_testnet + + - name: Stop local testnet with blinded block production + run: ./stop_local_testnet.sh + working-directory: scripts/local_testnet diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index da0bcb3857..1a7d78f61f 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -102,9 +102,19 @@ jobs: run: rustup update stable - name: Run operation_pool tests for all known forks run: make test-op-pool + slasher-tests: + name: slasher-tests + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Run slasher tests for all supported backends + run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: cargo-fmt steps: - uses: actions/checkout@v1 @@ -158,6 +168,18 @@ jobs: run: sudo npm install -g ganache - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim + merge-transition-ubuntu: + name: merge-transition-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install ganache + run: sudo npm install -g ganache + - name: Run the beacon chain sim and go through the merge transition + run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: name: no-eth1-simulator-ubuntu runs-on: ubuntu-latest @@ -252,6 +274,23 @@ jobs: run: make lint - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock + disallowed-from-async-lint: + name: disallowed-from-async-lint + runs-on: ubuntu-latest + needs: cargo-fmt + continue-on-error: true + steps: + - uses: actions/checkout@v1 + - name: Install SigP Clippy fork + run: | + cd .. + git clone https://github.com/michaelsproul/rust-clippy.git + cd rust-clippy + git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a + cargo build --release --bin cargo-clippy --bin clippy-driver + cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin + - name: Run Clippy with the disallowed-from-async lint + run: make nightly-lint check-msrv: name: check-msrv runs-on: ubuntu-latest @@ -298,8 +337,10 @@ jobs: - uses: actions/checkout@v1 - name: Install Rust (${{ env.PINNED_NIGHTLY }}) run: rustup toolchain install $PINNED_NIGHTLY + # NOTE: cargo-udeps version is pinned until this issue is resolved: + # https://github.com/est31/cargo-udeps/issues/135 - name: Install cargo-udeps - run: cargo install cargo-udeps --locked + run: cargo install cargo-udeps --locked --force --version 0.1.30 - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config diff --git a/.gitignore b/.gitignore index 9376efc768..ae9f83c46d 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,7 @@ perf.data* *.tar.gz /bin genesis.ssz +/clippy.toml + +# IntelliJ +/*.iml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 72f5e73920..489d12eb88 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,5 @@ # Contributors Guide +[![GitPOAP badge](https://public-api.gitpoap.io/v1/repo/sigp/lighthouse/badge)](https://www.gitpoap.io/gh/sigp/lighthouse) Lighthouse is an open-source Ethereum 2.0 client. We're community driven and welcome all contribution. We aim to provide a constructive, respectful and fun diff --git a/Cargo.lock b/Cargo.lock index c470e6c640..0590fe5080 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -187,6 +187,27 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +[[package]] +name = "async-stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.53" @@ -198,6 +219,17 @@ dependencies = [ "syn", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version 0.4.0", +] + [[package]] name = "asynchronous-codec" version = "0.6.0" @@ -211,15 +243,6 @@ dependencies = [ "pin-project-lite 0.2.9", ] -[[package]] -name = "atomic" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" -dependencies = [ - "autocfg 1.1.0", -] - [[package]] name = "attohttpc" version = "0.10.1" @@ -242,6 +265,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7862e21c893d65a1650125d157eaeec691439379a1cee17ee49031b79236ada4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "autocfg" version = "0.1.8" @@ -257,6 +292,53 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.5.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa 1.0.2", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite 0.2.9", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.65" @@ -290,6 +372,24 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" +[[package]] +name = "beacon-api-client" +version = "0.1.0" +source = "git+https://github.com/ralexstokes/beacon-api-client?rev=de34eeb#de34eeb92e4fdee5709d142910abf42cf857609b" +dependencies = [ + "ethereum-consensus", + "http", + "itertools", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", + "url", +] + [[package]] name = "beacon_chain" version = "0.2.0" @@ -342,11 +442,12 @@ dependencies = [ "tokio", "tree_hash", "types", + "unused_port", ] [[package]] name = "beacon_node" -version = "2.2.1" +version = "3.1.0" dependencies = [ "beacon_chain", "clap", @@ -372,6 +473,7 @@ dependencies = [ "slasher", "slog", "store", + "strum", "task_executor", "types", "unused_port", @@ -502,7 +604,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "2.2.1" +version = "3.1.0" dependencies = [ "beacon_node", "clap", @@ -554,6 +656,17 @@ dependencies = [ "safemem", ] +[[package]] +name = "builder_client" +version = "0.1.0" +dependencies = [ + "eth2", + "reqwest", + "sensitive_url", + "serde", + "serde_json", +] + [[package]] name = "bumpalo" version = "3.9.1" @@ -764,6 +877,7 @@ dependencies = [ "sensitive_url", "serde", "serde_derive", + "serde_yaml", "slasher", "slasher_service", "slog", @@ -773,7 +887,6 @@ dependencies = [ "time 0.3.9", "timer", "tokio", - "toml", "types", ] @@ -1162,6 +1275,16 @@ version = "0.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" +[[package]] +name = "delay_map" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6716ce9729be9628979ae1ff63e8bc8b7ad53b5472a2633bf079607a55328d36" +dependencies = [ + "futures", + "tokio-util 0.6.10", +] + [[package]] name = "deposit_contract" version = "0.2.0" @@ -1561,6 +1684,7 @@ dependencies = [ "eth2", "eth2_ssz", "eth2_ssz_derive", + "execution_layer", "fallback", "futures", "hex", @@ -1572,12 +1696,12 @@ dependencies = [ "sensitive_url", "serde", "serde_json", + "serde_yaml", "slog", "sloggers", "state_processing", "task_executor", "tokio", - "toml", "tree_hash", "types", "web3", @@ -1849,6 +1973,27 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "ethereum-consensus" +version = "0.1.1" +source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e1188b1#e1188b14f320f225f2e53aa10336614565f04129" +dependencies = [ + "async-stream", + "blst", + "enr", + "hex", + "integer-sqrt", + "multiaddr 0.14.0", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.9.9", + "ssz-rs", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "ethereum-types" version = "0.12.1" @@ -1898,14 +2043,54 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "ethers-providers" +version = "0.6.0" +source = "git+https://github.com/gakonst/ethers-rs?rev=02ad93a1cfb7b62eb051c77c61dc4c0218428e4a#02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" +dependencies = [ + "async-trait", + "auto_impl", + "base64", + "ethers-core", + "futures-channel", + "futures-core", + "futures-timer", + "futures-util", + "hex", + "http", + "once_cell", + "parking_lot 0.11.2", + "pin-project 1.0.10", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-tungstenite 0.17.2", + "tracing", + "tracing-futures", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-timer", + "web-sys", + "ws_stream_wasm", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" dependencies = [ + "deposit_contract", "environment", + "ethers-core", + "ethers-providers", "execution_layer", "exit-future", + "fork_choice", "futures", + "hex", + "reqwest", "sensitive_url", "serde_json", "task_executor", @@ -1920,19 +2105,24 @@ name = "execution_layer" version = "0.1.0" dependencies = [ "async-trait", + "builder_client", "bytes", "environment", - "eth1", + "eth2", "eth2_serde_utils", + "eth2_ssz", "eth2_ssz_types", + "ethereum-consensus", "ethers-core", "exit-future", + "fork_choice", "futures", "hex", "jsonwebtoken", "lazy_static", "lighthouse_metrics", "lru", + "mev-build-rs", "parking_lot 0.12.0", "rand 0.8.5", "reqwest", @@ -1941,9 +2131,13 @@ dependencies = [ "serde_json", "slog", "slot_clock", + "ssz-rs", + "state_processing", + "strum", "task_executor", "tempfile", "tokio", + "tokio-stream", "tree_hash", "tree_hash_derive", "types", @@ -2093,7 +2287,10 @@ dependencies = [ "eth2_ssz", "eth2_ssz_derive", "proto_array", + "slog", + "state_processing", "store", + "tokio", "types", ] @@ -2397,22 +2594,22 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] + [[package]] name = "hashlink" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" dependencies = [ - "hashbrown", -] - -[[package]] -name = "hashset_delay" -version = "0.2.0" -dependencies = [ - "futures", - "tokio", - "tokio-util 0.6.10", + "hashbrown 0.11.2", ] [[package]] @@ -2550,6 +2747,12 @@ dependencies = [ "pin-project-lite 0.2.9", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "http_api" version = "0.1.0" @@ -2568,11 +2771,14 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "logging", + "lru", "network", "parking_lot 0.12.0", + "proto_array", "safe_arith", "sensitive_url", "serde", + "serde_json", "slog", "slot_clock", "state_processing", @@ -2582,6 +2788,7 @@ dependencies = [ "tokio-stream", "tree_hash", "types", + "unused_port", "warp", "warp_utils", ] @@ -2649,6 +2856,19 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +dependencies = [ + "http", + "hyper", + "rustls 0.20.6", + "tokio", + "tokio-rustls 0.23.4", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -2777,7 +2997,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ "autocfg 1.1.0", - "hashbrown", + "hashbrown 0.11.2", ] [[package]] @@ -2787,6 +3007,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] @@ -2962,9 +3185,10 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "2.2.1" +version = "3.1.0" dependencies = [ "account_utils", + "beacon_chain", "bls", "clap", "clap_utils", @@ -2986,6 +3210,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "snap", "state_processing", "store", "tree_hash", @@ -3061,9 +3286,8 @@ checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" [[package]] name = "libmdbx" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897bbc93f0bc0152d809b89aa9b412ccb0c763881eac057ce891a5d497537f75" +version = "0.1.4" +source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" dependencies = [ "bitflags", "byteorder", @@ -3077,18 +3301,17 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.43.0" +version = "0.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e8570e25fa03d4385405dbeaf540ba00e3ee50942f03d84e1a8928a029f35f9" +checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029" dependencies = [ - "atomic", "bytes", "futures", "futures-timer", "getrandom 0.2.6", "instant", "lazy_static", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3130,11 +3353,11 @@ dependencies = [ "multistream-select 0.10.4", "parking_lot 0.11.2", "pin-project 1.0.10", - "prost", - "prost-build", + "prost 0.9.0", + "prost-build 0.9.0", "rand 0.8.5", "ring", - "rw-stream-sink", + "rw-stream-sink 0.2.1", "sha2 0.9.9", "smallvec", "thiserror", @@ -3145,9 +3368,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.32.1" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db5b02602099fb75cb2d16f9ea860a320d6eb82ce41e95ab680912c454805cd5" +checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979" dependencies = [ "asn1_der", "bs58", @@ -3165,11 +3388,11 @@ dependencies = [ "multistream-select 0.11.0", "parking_lot 0.12.0", "pin-project 1.0.10", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "rand 0.8.5", "ring", - "rw-stream-sink", + "rw-stream-sink 0.3.0", "sha2 0.10.2", "smallvec", "thiserror", @@ -3180,22 +3403,23 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.32.1" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "066e33e854e10b5c93fc650458bf2179c7e0d143db260b0963e44a94859817f1" +checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268" dependencies = [ "futures", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "log", + "parking_lot 0.12.0", "smallvec", "trust-dns-resolver", ] [[package]] name = "libp2p-gossipsub" -version = "0.36.0" +version = "0.38.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f62943fba0b0dae02b87868620c52a581c54ec9fb04b5e195cf20313fc510c3" +checksum = "43e064ba4d7832e01c738626c6b274ae100baba05f5ffcc7b265c2a3ed398108" dependencies = [ "asynchronous-codec", "base64", @@ -3205,12 +3429,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "libp2p-swarm", "log", "prometheus-client", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3221,28 +3445,32 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f219b4d4660fe3a04bf5fe6b5970902b7c1918e25b2536be8c70efc480f88f8" +checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984" dependencies = [ + "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "libp2p-swarm", "log", "lru", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", + "prost-codec", "smallvec", + "thiserror", + "void", ] [[package]] name = "libp2p-metrics" -version = "0.4.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29e4e5e4c5aa567fe1ee3133afe088dc2d2fd104e20c5c2c5c2649f75129677" +checksum = "564a7e5284d7d9b3140fdfc3cb6567bc32555e86a21de5604c2ec85da05cf384" dependencies = [ - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3251,14 +3479,14 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442eb0c9fff0bf22a34f015724b4143ce01877e079ed0963c722d94c07c72160" +checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "log", "nohash-hasher", "parking_lot 0.12.0", @@ -3269,18 +3497,18 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd7e0c94051cda67123be68cf6b65211ba3dde7277be9068412de3e7ffd63ef" +checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad" dependencies = [ "bytes", "curve25519-dalek 3.2.0", "futures", "lazy_static", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "log", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3291,33 +3519,33 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "962c0fb0e7212fb96a69b87f2d09bcefd317935239bdc79cda900e7a8897a3fe" +checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "log", - "prost", - "prost-build", + "prost 0.10.4", + "prost-build 0.10.4", "unsigned-varint 0.7.1", "void", ] [[package]] name = "libp2p-swarm" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53ab2d4eb8ef2966b10fdf859245cdd231026df76d3c6ed2cf9e418a8f688ec9" +checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "log", "pin-project 1.0.10", "rand 0.7.3", @@ -3338,16 +3566,16 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193447aa729c85aac2376828df76d171c1a589c9e6b58fcc7f9d9a020734122c" +checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892" dependencies = [ "futures", "futures-timer", "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "log", "socket2", "tokio", @@ -3355,17 +3583,18 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c932834c3754501c368d1bf3d0fb458487a642b90fc25df082a3a2f3d3b32e37" +checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "log", + "parking_lot 0.12.0", "quicksink", - "rw-stream-sink", + "rw-stream-sink 0.3.0", "soketto", "url", "webpki-roots", @@ -3373,12 +3602,12 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be902ebd89193cd020e89e89107726a38cfc0d16d18f613f4a37d046e92c7517" +checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" dependencies = [ "futures", - "libp2p-core 0.32.1", + "libp2p-core 0.33.0", "parking_lot 0.12.0", "thiserror", "yamux", @@ -3456,7 +3685,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "2.2.1" +version = "3.1.0" dependencies = [ "account_manager", "account_utils", @@ -3469,6 +3698,7 @@ dependencies = [ "directory", "env_logger 0.9.0", "environment", + "eth1", "eth2_hashing 0.3.0", "eth2_network_config", "futures", @@ -3481,6 +3711,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "slasher", "slashing_protection", "slog", "sloggers", @@ -3505,6 +3736,7 @@ dependencies = [ name = "lighthouse_network" version = "0.2.0" dependencies = [ + "delay_map", "directory", "dirs", "discv5", @@ -3515,7 +3747,6 @@ dependencies = [ "exit-future", "fnv", "futures", - "hashset_delay", "hex", "lazy_static", "libp2p", @@ -3524,6 +3755,8 @@ dependencies = [ "lru", "parking_lot 0.12.0", "prometheus-client", + "quickcheck 0.9.2", + "quickcheck_macros", "rand 0.8.5", "regex", "serde", @@ -3563,6 +3796,27 @@ version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +[[package]] +name = "lmdb-rkv" +version = "0.14.0" +source = "git+https://github.com/sigp/lmdb-rs?rev=f33845c6469b94265319aac0ed5085597862c27e#f33845c6469b94265319aac0ed5085597862c27e" +dependencies = [ + "bitflags", + "byteorder", + "libc", + "lmdb-rkv-sys", +] + +[[package]] +name = "lmdb-rkv-sys" +version = "0.11.2" +source = "git+https://github.com/sigp/lmdb-rs?rev=f33845c6469b94265319aac0ed5085597862c27e#f33845c6469b94265319aac0ed5085597862c27e" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + [[package]] name = "lock_api" version = "0.4.7" @@ -3603,11 +3857,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.5" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32613e41de4c47ab04970c348ca7ae7382cf116625755af070b008a15516a889" +checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" dependencies = [ - "hashbrown", + "hashbrown 0.12.3", ] [[package]] @@ -3675,10 +3929,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] -name = "mdbx-sys" -version = "0.11.7-6" +name = "matchit" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e191c9af757a54621bb4fddb6264ea5cc38cd340afa79cd531132216979867" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" + +[[package]] +name = "mdbx-sys" +version = "0.11.6-4" +source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" dependencies = [ "bindgen", "cc", @@ -3713,6 +3972,22 @@ dependencies = [ "safe_arith", ] +[[package]] +name = "mev-build-rs" +version = "0.2.1" +source = "git+https://github.com/ralexstokes/mev-rs?rev=a088806575805c00d63fa59c002abc5eb1dc7709#a088806575805c00d63fa59c002abc5eb1dc7709" +dependencies = [ + "async-trait", + "axum", + "beacon-api-client", + "ethereum-consensus", + "serde", + "serde_json", + "ssz-rs", + "thiserror", + "tracing", +] + [[package]] name = "milagro_bls" version = "1.4.2" @@ -3737,6 +4012,7 @@ dependencies = [ "parking_lot 0.11.2", "rayon", "serde", + "smallvec", "tree_hash", "triomphe", "typenum", @@ -3776,9 +4052,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", @@ -3971,6 +4247,7 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", + "delay_map", "derivative", "environment", "error-chain", @@ -3980,7 +4257,6 @@ dependencies = [ "fnv", "futures", "genesis", - "hashset_delay", "hex", "if-addrs 0.6.7", "igd", @@ -4040,6 +4316,7 @@ dependencies = [ "beacon_node", "environment", "eth2", + "execution_layer", "sensitive_url", "tempfile", "types", @@ -4235,18 +4512,21 @@ name = "operation_pool" version = "0.2.0" dependencies = [ "beacon_chain", + "bitvec 1.0.0", "derivative", "eth2_ssz", "eth2_ssz_derive", "itertools", "lazy_static", "lighthouse_metrics", + "maplit", "parking_lot 0.12.0", "rayon", "serde", "serde_derive", "state_processing", "store", + "tokio", "types", ] @@ -4423,6 +4703,16 @@ dependencies = [ "indexmap", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version 0.4.0", +] + [[package]] name = "pin-project" version = "0.4.29" @@ -4675,9 +4965,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a896938cc6018c64f279888b8c7559d3725210d5db9a3a1ee6bc7188d51d34" +checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" dependencies = [ "dtoa", "itoa 1.0.2", @@ -4703,7 +4993,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.9.0", +] + +[[package]] +name = "prost" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +dependencies = [ + "bytes", + "prost-derive 0.10.1", ] [[package]] @@ -4719,13 +5019,48 @@ dependencies = [ "log", "multimap", "petgraph", - "prost", - "prost-types", + "prost 0.9.0", + "prost-types 0.9.0", "regex", "tempfile", "which", ] +[[package]] +name = "prost-build" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" +dependencies = [ + "bytes", + "cfg-if", + "cmake", + "heck 0.4.0", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prost 0.10.4", + "prost-types 0.10.1", + "regex", + "tempfile", + "which", +] + +[[package]] +name = "prost-codec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" +dependencies = [ + "asynchronous-codec", + "bytes", + "prost 0.10.4", + "thiserror", + "unsigned-varint 0.7.1", +] + [[package]] name = "prost-derive" version = "0.9.0" @@ -4739,6 +5074,19 @@ dependencies = [ "syn", ] +[[package]] +name = "prost-derive" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost-types" version = "0.9.0" @@ -4746,7 +5094,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ "bytes", - "prost", + "prost 0.9.0", +] + +[[package]] +name = "prost-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +dependencies = [ + "bytes", + "prost 0.10.4", ] [[package]] @@ -5051,6 +5409,7 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-rustls", "hyper-tls", "ipnet", "js-sys", @@ -5060,16 +5419,20 @@ dependencies = [ "native-tls", "percent-encoding", "pin-project-lite 0.2.9", + "rustls 0.20.6", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-rustls 0.23.4", "tokio-util 0.6.10", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.10.1", ] @@ -5240,6 +5603,15 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "rustls-pemfile" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ee86d63972a7c661d1536fefe8c3c8407321c3df668891286de28abcd087360" +dependencies = [ + "base64", +] + [[package]] name = "rustversion" version = "1.0.6" @@ -5257,6 +5629,17 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "rw-stream-sink" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" +dependencies = [ + "futures", + "pin-project 1.0.10", + "static_assertions", +] + [[package]] name = "ryu" version = "1.0.10" @@ -5447,6 +5830,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "930c0acf610d3fdb5e2ab6213019aaa04e227ebe9547b0649ba599b16d788bd7" + [[package]] name = "sensitive_url" version = "0.1.0" @@ -5529,6 +5918,28 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" +dependencies = [ + "serde", + "serde_with_macros", +] + +[[package]] +name = "serde_with_macros" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_yaml" version = "0.8.24" @@ -5665,6 +6076,7 @@ dependencies = [ "env_logger 0.9.0", "eth1", "eth1_test_rig", + "execution_layer", "futures", "node_test_rig", "parking_lot 0.12.0", @@ -5693,6 +6105,8 @@ dependencies = [ "lazy_static", "libmdbx", "lighthouse_metrics", + "lmdb-rkv", + "lmdb-rkv-sys", "logging", "lru", "maplit", @@ -5704,6 +6118,7 @@ dependencies = [ "serde_derive", "slog", "sloggers", + "strum", "tempfile", "tree_hash", "tree_hash_derive", @@ -5935,6 +6350,31 @@ dependencies = [ "der 0.5.1", ] +[[package]] +name = "ssz-rs" +version = "0.8.0" +source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +dependencies = [ + "bitvec 1.0.0", + "hex", + "lazy_static", + "num-bigint", + "serde", + "sha2 0.9.9", + "ssz-rs-derive", + "thiserror", +] + +[[package]] +name = "ssz-rs-derive" +version = "0.8.0" +source = "git+https://github.com/ralexstokes/ssz-rs?rev=cb08f1#cb08f18ca919cc1b685b861d0fa9e2daabe89737" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -5948,9 +6388,11 @@ dependencies = [ "arbitrary", "beacon_chain", "bls", + "derivative", "env_logger 0.9.0", "eth2_hashing 0.3.0", "eth2_ssz", + "eth2_ssz_derive", "eth2_ssz_types", "int_to_bytes", "integer-sqrt", @@ -5962,6 +6404,7 @@ dependencies = [ "rustc-hash", "safe_arith", "smallvec", + "tokio", "tree_hash", "types", "vec_map", @@ -5975,6 +6418,7 @@ dependencies = [ "eth2_ssz", "lazy_static", "state_processing", + "tokio", "types", ] @@ -6027,9 +6471,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96acfc1b70604b8b2f1ffa4c57e59176c7dbb05d556c71ecd2f5498a1dee7f8" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ "strum_macros", ] @@ -6087,6 +6531,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "synstructure" version = "0.12.6" @@ -6335,10 +6785,11 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.18.2" +version = "1.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" +checksum = "0020c875007ad96677dcc890298f4b942882c5d4eb7cc8f439fc3bf813dc9c95" dependencies = [ + "autocfg 1.1.0", "bytes", "libc", "memchr", @@ -6396,15 +6847,26 @@ dependencies = [ ] [[package]] -name = "tokio-stream" -version = "0.1.8" +name = "tokio-rustls" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls 0.20.6", + "tokio", + "webpki 0.22.0", +] + +[[package]] +name = "tokio-stream" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.6.10", + "tokio-util 0.7.2", ] [[package]] @@ -6417,7 +6879,23 @@ dependencies = [ "log", "pin-project 1.0.10", "tokio", - "tungstenite", + "tungstenite 0.14.0", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" +dependencies = [ + "futures-util", + "log", + "rustls 0.20.6", + "tokio", + "tokio-rustls 0.23.4", + "tungstenite 0.17.3", + "webpki 0.22.0", + "webpki-roots", ] [[package]] @@ -6459,6 +6937,47 @@ dependencies = [ "serde", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project 1.0.10", + "pin-project-lite 0.2.9", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite 0.2.9", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.1" @@ -6499,6 +7018,16 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project 1.0.10", + "tracing", +] + [[package]] name = "tracing-log" version = "0.1.3" @@ -6652,6 +7181,27 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" +dependencies = [ + "base64", + "byteorder", + "bytes", + "http", + "httparse", + "log", + "rand 0.8.5", + "rustls 0.20.6", + "sha-1 0.10.0", + "thiserror", + "url", + "utf-8", + "webpki 0.22.0", +] + [[package]] name = "twoway" version = "0.1.8" @@ -6691,6 +7241,7 @@ dependencies = [ "itertools", "lazy_static", "log", + "maplit", "milhouse", "parking_lot 0.12.0", "rand 0.8.5", @@ -6703,6 +7254,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_with", "serde_yaml", "slog", "smallvec", @@ -6711,6 +7263,7 @@ dependencies = [ "swap_or_not_shuffle", "tempfile", "test_random_derive", + "tokio", "tree_hash", "tree_hash_derive", ] @@ -6884,6 +7437,7 @@ dependencies = [ "lighthouse_version", "lockfile", "logging", + "malloc_utils", "monitoring_api", "parking_lot 0.12.0", "rand 0.8.5", @@ -6998,9 +7552,9 @@ dependencies = [ "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "tokio-stream", - "tokio-tungstenite", + "tokio-tungstenite 0.15.0", "tokio-util 0.6.10", "tower-service", "tracing", @@ -7385,6 +7939,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "ws_stream_wasm" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47ca1ab42f5afed7fc332b22b6e932ca5414b209465412c8cdf0ad23bc0de645" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "pharos", + "rustc_version 0.4.0", + "send_wrapper", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index b873a15cd9..7ef316f630 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "beacon_node", "beacon_node/beacon_chain", + "beacon_node/builder_client", "beacon_node/client", "beacon_node/eth1", "beacon_node/lighthouse_network", @@ -27,7 +28,6 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", - "common/hashset_delay", "common/lighthouse_metrics", "common/lighthouse_version", "common/lockfile", diff --git a/Cross.toml b/Cross.toml index 2db3992464..d5f7a5d506 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,15 +1,5 @@ -[build.env] -passthrough = [ - "RUSTFLAGS", -] - -# These custom images are required to work around the lack of Clang in the default `cross` images. -# We need Clang to run `bindgen` for MDBX, and the `BINDGEN_EXTRA_CLANG_ARGS` flags must also be set -# while cross-compiling for ARM to prevent bindgen from attempting to include headers from the host. -# -# For more information see https://github.com/rust-embedded/cross/pull/608 [target.x86_64-unknown-linux-gnu] -image = "michaelsproul/cross-clang:x86_64-latest" +pre-build = ["apt-get install -y cmake clang-3.9"] [target.aarch64-unknown-linux-gnu] -image = "michaelsproul/cross-clang:aarch64-latest" +pre-build = ["apt-get install -y cmake clang-3.9"] diff --git a/Dockerfile b/Dockerfile index 76347e9bfe..86a69c6539 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,11 @@ -FROM rust:1.58.1-bullseye AS builder +FROM rust:1.62.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES ENV FEATURES $FEATURES RUN cd lighthouse && make -FROM ubuntu:latest +FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ diff --git a/Dockerfile.cross b/Dockerfile.cross index c8bd868878..e210c5bdfc 100644 --- a/Dockerfile.cross +++ b/Dockerfile.cross @@ -1,7 +1,7 @@ # This image is meant to enable cross-architecture builds. # It assumes the lighthouse binary has already been # compiled for `$TARGETPLATFORM` and moved to `./bin`. -FROM --platform=$TARGETPLATFORM ubuntu:latest +FROM --platform=$TARGETPLATFORM ubuntu:22.04 RUN apt-get update && apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ diff --git a/Makefile b/Makefile index 01fd45a4dd..6b5c6b3e5d 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,10 @@ AARCH64_TAG = "aarch64-unknown-linux-gnu" BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly +CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 + +# List of features to use when cross-compiling. Can be overridden via the environment. +CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. @@ -41,13 +45,13 @@ install-lcli: # optimized CPU functions that may not be available on some systems. This # results in a more portable binary with ~20% slower BLS verification. build-x86_64: - cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features modern,gnosis + cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features "modern,$(CROSS_FEATURES)" build-x86_64-portable: - cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features portable,gnosis + cross build --release --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" build-aarch64: - cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features gnosis + cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features "$(CROSS_FEATURES)" build-aarch64-portable: - cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features portable,gnosis + cross build --release --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -76,7 +80,7 @@ build-release-tarballs: # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: - cargo test --workspace --release --exclude ef_tests --exclude beacon_chain + cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. @@ -117,6 +121,11 @@ test-op-pool-%: --features 'beacon_chain/fork_from_env'\ -p operation_pool +# Run the tests in the `slasher` crate for all supported database backends. +test-slasher: + cargo test --release -p slasher --features mdbx + cargo test --release -p slasher --no-default-features --features lmdb + # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: make -C $(STATE_TRANSITION_VECTORS) test @@ -141,10 +150,18 @@ lint: cargo clippy --workspace --tests -- \ -D clippy::fn_to_numeric_cast_any \ -D warnings \ + -A clippy::derive_partial_eq_without_eq \ -A clippy::from-over-into \ -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push +nightly-lint: + cp .github/custom/clippy.toml . + cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \ + -A clippy::all \ + -D clippy::disallowed_from_async + rm clippy.toml + # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum @@ -161,7 +178,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 + cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 --ignore RUSTSEC-2022-0040 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 4c7140df39..c581866a25 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -280,6 +280,8 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin password_opt, graffiti, suggested_fee_recipient, + None, + None, ) .map_err(|e| format!("Unable to create new validator definition: {:?}", e))?; diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 986ff7a615..7245258bb2 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "2.2.1" +version = "3.1.0" authors = ["Paul Hauner ", "Age Manning Clone for IndexedUnaggregatedAttestation<'a, T> { /// A helper trait implemented on wrapper types that can be progressed to a state where they can be /// verified for application to fork choice. -pub trait VerifiedAttestation { +pub trait VerifiedAttestation: Sized { fn attestation(&self) -> &Attestation; fn indexed_attestation(&self) -> &IndexedAttestation; + + // Inefficient default implementation. This is overridden for gossip verified attestations. + fn into_attestation_and_indices(self) -> (Attestation, Vec) { + let attestation = self.attestation().clone(); + let attesting_indices = self.indexed_attestation().attesting_indices.clone().into(); + (attestation, attesting_indices) + } } impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregatedAttestation<'a, T> { @@ -976,8 +983,8 @@ fn verify_head_block_is_known( max_skip_slots: Option, ) -> Result { let block_opt = chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&attestation.data.beacon_block_root) .or_else(|| { chain @@ -1245,7 +1252,10 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - if !chain.fork_choice.read().contains_block(&target.root) + if !chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&target.root) && !chain.early_attester_cache.contains_block(target.root) { return Err(Error::UnknownTargetRoot(target.root)); diff --git a/beacon_node/beacon_chain/src/attestation_verification/batch.rs b/beacon_node/beacon_chain/src/attestation_verification/batch.rs index 30f1ae7e5b..6f76cce024 100644 --- a/beacon_node/beacon_chain/src/attestation_verification/batch.rs +++ b/beacon_node/beacon_chain/src/attestation_verification/batch.rs @@ -65,7 +65,7 @@ where .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; - let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?; + let fork = chain.canonical_head.cached_head().head_fork(); let mut signature_sets = Vec::with_capacity(num_indexed * 3); @@ -169,13 +169,13 @@ where &metrics::ATTESTATION_PROCESSING_BATCH_UNAGG_SIGNATURE_SETUP_TIMES, ); + let fork = chain.canonical_head.cached_head().head_fork(); + let pubkey_cache = chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)?; - let fork = chain.with_head(|head| Ok::<_, BeaconChainError>(head.beacon_state.fork()))?; - let mut signature_sets = Vec::with_capacity(num_partially_verified); // Iterate, flattening to get only the `Ok` values. diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 812622b3aa..e1ef5f7c58 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -9,15 +9,15 @@ use crate::beacon_proposer_cache::BeaconProposerCache; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, - signature_verify_chain_segment, BlockError, FullyVerifiedBlock, GossipVerifiedBlock, - IntoFullyVerifiedBlock, + signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock, + IntoExecutionPendingBlock, PayloadVerificationOutcome, POS_PANDA_BANNER, }; use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::ServerSentEventHandler; -use crate::execution_payload::get_execution_payload; +use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::head_tracker::HeadTracker; use crate::historical_blocks::HistoricalBlockError; @@ -51,29 +51,35 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::BeaconForkChoiceStore; use crate::BeaconSnapshot; use crate::{metrics, BeaconChainError}; -use eth2::types::{ - EventKind, SseBlock, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead, SyncDuty, +use eth2::types::{EventKind, SseBlock, SyncDuty}; +use execution_layer::{ + BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, +}; +use fork_choice::{ + AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, + InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, }; -use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; -use fork_choice::{AttestationFromBlock, ForkChoice, InvalidationOperation}; use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; -use operation_pool::{OperationPool, PersistedOperationPool}; +use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; -use proto_array::ExecutionStatus; +use proto_array::CountUnrealizedFull; use safe_arith::SafeArith; use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::{ - common::get_indexed_attestation, + common::{get_attesting_indices_from_state, get_indexed_attestation}, per_block_processing, - per_block_processing::{errors::AttestationValidationError, is_merge_transition_complete}, + per_block_processing::{ + errors::AttestationValidationError, verify_attestation_for_block_inclusion, + VerifySignatures, + }, per_slot_processing, state_advance::{complete_state_advance, partial_state_advance}, - BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation, }; use std::cmp::Ordering; use std::collections::HashMap; @@ -86,15 +92,17 @@ use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterato use store::{ DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; -use task_executor::ShutdownReason; +use task_executor::{ShutdownReason, TaskExecutor}; use tree_hash::TreeHash; use types::*; +pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; +pub use fork_choice::CountUnrealized; + pub type ForkChoiceError = fork_choice::Error; -/// The time-out before failure during an operation to take a read/write RwLock on the canonical -/// head. -pub const HEAD_LOCK_TIMEOUT: Duration = Duration::from_secs(1); +/// Alias to appease clippy. +type HashBlockTuple = (Hash256, Arc>); /// The time-out before failure during an operation to take a read/write RwLock on the /// attestation cache. @@ -118,12 +126,26 @@ const EARLY_ATTESTER_CACHE_HISTORIC_SLOTS: u64 = 4; /// If the head block is older than this value, don't bother preparing beacon proposers. const PREPARE_PROPOSER_HISTORIC_EPOCHS: u64 = 4; +/// If the head is more than `MAX_PER_SLOT_FORK_CHOICE_DISTANCE` slots behind the wall-clock slot, DO NOT +/// run the per-slot tasks (primarily fork choice). +/// +/// This prevents unnecessary work during sync. +/// +/// The value is set to 256 since this would be just over one slot (12.8s) when syncing at +/// 20 slots/second. Having a single fork-choice run interrupt syncing would have very little +/// impact whilst having 8 epochs without a block is a comfortable grace period. +const MAX_PER_SLOT_FORK_CHOICE_DISTANCE: u64 = 256; + /// Reported to the user when the justified block has an invalid execution payload. pub const INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON: &str = "Justified block has an invalid execution payload."; -/// Interval before the attestation deadline during which to consider blocks "borderline" late. -const BORDERLINE_LATE_BLOCK_TOLERANCE: Duration = Duration::from_millis(50); +// FIXME(sproul): decide whether to keep this +// Interval before the attestation deadline during which to consider blocks "borderline" late. +// const BORDERLINE_LATE_BLOCK_TOLERANCE: Duration = Duration::from_millis(50); + +pub const INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON: &str = + "Finalized merge transition block is invalid."; /// Defines the behaviour when a block/block-root for a skipped slot is requested. pub enum WhenSlotSkipped { @@ -208,22 +230,6 @@ pub enum StateSkipConfig { WithoutStateRoots, } -#[derive(Debug, PartialEq)] -pub struct HeadInfo { - pub slot: Slot, - pub block_root: Hash256, - pub state_root: Hash256, - pub current_justified_checkpoint: types::Checkpoint, - pub finalized_checkpoint: types::Checkpoint, - pub fork: Fork, - pub genesis_time: u64, - pub genesis_validators_root: Hash256, - pub proposer_shuffling_decision_root: Hash256, - pub is_merge_transition_complete: bool, - pub execution_payload_block_hash: Option, - pub random: Hash256, -} - pub trait BeaconChainTypes: Send + Sync + 'static { type HotStore: store::ItemStore; type ColdStore: store::ItemStore; @@ -232,23 +238,22 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type EthSpec: types::EthSpec; } -/// Indicates the EL payload verification status of the head beacon block. -#[derive(Debug, PartialEq)] -pub enum HeadSafetyStatus { - /// The head block has either been verified by an EL or is does not require EL verification - /// (e.g., it is pre-merge or pre-terminal-block). - /// - /// If the block is post-terminal-block, `Some(execution_payload.block_hash)` is included with - /// the variant. - Safe(Option), - /// The head block execution payload has not yet been verified by an EL. - /// - /// The `execution_payload.block_hash` of the head block is returned. - Unsafe(ExecutionBlockHash), - /// The head block execution payload was deemed to be invalid by an EL. - /// - /// The `execution_payload.block_hash` of the head block is returned. - Invalid(ExecutionBlockHash), +/// Used internally to split block production into discrete functions. +struct PartialBeaconBlock { + state: BeaconState, + slot: Slot, + proposer_index: u64, + parent_root: Hash256, + randao_reveal: Signature, + eth1_data: Eth1Data, + graffiti: Graffiti, + proposer_slashings: Vec, + attester_slashings: Vec>, + attestations: Vec>, + deposits: Vec, + voluntary_exits: Vec, + sync_aggregate: Option>, + prepare_payload_handle: Option>, } pub type BeaconForkChoice = ForkChoice< @@ -276,6 +281,8 @@ pub struct BeaconChain { pub config: ChainConfig, /// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB. pub store: BeaconStore, + /// Used for spawning async and blocking tasks. + pub task_executor: TaskExecutor, /// Database migrator for running background maintenance on the store. pub store_migrator: BackgroundMigrator, /// Reports the current slot, typically based upon the system clock. @@ -326,29 +333,29 @@ pub struct BeaconChain { /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, /// Interfaces with the execution client. - pub execution_layer: Option, - /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. - pub(crate) canonical_head: TimeoutRwLock>, + pub execution_layer: Option>, + /// Stores information about the canonical head and finalized/justified checkpoints of the + /// chain. Also contains the fork choice struct, for computing the canonical head. + pub canonical_head: CanonicalHead, /// The root of the genesis block. pub genesis_block_root: Hash256, /// The root of the genesis state. pub genesis_state_root: Hash256, /// The root of the list of genesis validators, used during syncing. pub genesis_validators_root: Hash256, - /// A state-machine that is updated with information from the network and chooses a canonical - /// head block. - pub fork_choice: RwLock>, /// Transmitter used to indicate that slot-start fork choice has completed running. pub fork_choice_signal_tx: Option, /// Receiver used by block production to wait on slot-start fork choice. pub fork_choice_signal_rx: Option, + /// The genesis time of this `BeaconChain` (seconds since UNIX epoch). + pub genesis_time: u64, /// A handler for events generated by the beacon chain. This is only initialized when the /// HTTP server is enabled. pub event_handler: Option>, /// Used to track the heads of the beacon chain. pub(crate) head_tracker: Arc, /// Caches the attester shuffling for a given epoch and shuffling key root. - pub(crate) shuffling_cache: TimeoutRwLock, + pub shuffling_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. @@ -420,25 +427,14 @@ impl BeaconChain { .as_kv_store_op(BEACON_CHAIN_DB_KEY) } - /// Return a database operation for writing fork choice to disk. - pub fn persist_fork_choice_in_batch(&self) -> Result { - let fork_choice = self.fork_choice.read(); - Self::persist_fork_choice_in_batch_standalone(&fork_choice) - } - - /// Return a database operation for writing fork choice to disk. - pub fn persist_fork_choice_in_batch_standalone( - fork_choice: &BeaconForkChoice, - ) -> Result { - let persisted_fork_choice = PersistedForkChoice { - fork_choice: fork_choice.to_persisted(), - fork_choice_store: fork_choice.fc_store().to_persisted(), - }; - persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY) - } - /// Load fork choice from disk, returning `None` if it isn't found. - pub fn load_fork_choice(store: BeaconStore) -> Result>, Error> { + pub fn load_fork_choice( + store: BeaconStore, + reset_payload_statuses: ResetPayloadStatuses, + count_unrealized_full: CountUnrealizedFull, + spec: &ChainSpec, + log: &Logger, + ) -> Result>, Error> { let persisted_fork_choice = match store.get_item::(&FORK_CHOICE_DB_KEY)? { Some(fc) => fc, @@ -450,7 +446,11 @@ impl BeaconChain { Ok(Some(ForkChoice::from_persisted( persisted_fork_choice.fork_choice, + reset_payload_statuses, fc_store, + count_unrealized_full, + spec, + log, )?)) } @@ -528,11 +528,11 @@ impl BeaconChain { )); } - let local_head = self.head()?; + let local_head = self.head_snapshot(); let iter = self.store.forwards_block_roots_iterator( start_slot, - local_head.beacon_state, + local_head.beacon_state.clone(), local_head.beacon_block_root, &self.spec, )?; @@ -542,6 +542,7 @@ impl BeaconChain { /// Even more efficient variant of `forwards_iter_block_roots` that will avoid cloning the head /// state if it isn't required for the requested range of blocks. + /// The range [start_slot, end_slot] is inclusive (ie `start_slot <= end_slot`) pub fn forwards_iter_block_roots_until( &self, start_slot: Slot, @@ -597,77 +598,6 @@ impl BeaconChain { .map(|result| result.map_err(|e| e.into()))) } - /// Iterate through the current chain to find the slot intersecting with the given beacon state. - /// The maximum depth this will search is `SLOTS_PER_HISTORICAL_ROOT`, and if that depth is reached - /// and no intersection is found, the finalized slot will be returned. - pub fn find_reorg_slot( - &self, - new_state: &BeaconState, - new_block_root: Hash256, - ) -> Result { - self.with_head(|snapshot| { - let old_state = &snapshot.beacon_state; - let old_block_root = snapshot.beacon_block_root; - - // The earliest slot for which the two chains may have a common history. - let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot()); - - // Create an iterator across `$state`, assuming that the block at `$state.slot` has the - // block root of `$block_root`. - // - // The iterator will be skipped until the next value returns `lowest_slot`. - // - // This is a macro instead of a function or closure due to the complex types invloved - // in all the iterator wrapping. - macro_rules! aligned_roots_iter { - ($state: ident, $block_root: ident) => { - std::iter::once(Ok(($state.slot(), $block_root))) - .chain($state.rev_iter_block_roots(&self.spec)) - .skip_while(|result| { - result - .as_ref() - .map_or(false, |(slot, _)| *slot > lowest_slot) - }) - }; - } - - // Create iterators across old/new roots where iterators both start at the same slot. - let mut new_roots = aligned_roots_iter!(new_state, new_block_root); - let mut old_roots = aligned_roots_iter!(old_state, old_block_root); - - // Whilst *both* of the iterators are still returning values, try and find a common - // ancestor between them. - while let (Some(old), Some(new)) = (old_roots.next(), new_roots.next()) { - let (old_slot, old_root) = old?; - let (new_slot, new_root) = new?; - - // Sanity check to detect programming errors. - if old_slot != new_slot { - return Err(Error::InvalidReorgSlotIter { new_slot, old_slot }); - } - - if old_root == new_root { - // A common ancestor has been found. - return Ok(old_slot); - } - } - - // If no common ancestor is found, declare that the re-org happened at the previous - // finalized slot. - // - // Sometimes this will result in the return slot being *lower* than the actual reorg - // slot. However, assuming we don't re-org through a finalized slot, it will never be - // *higher*. - // - // We provide this potentially-inaccurate-but-safe information to avoid onerous - // database reads during times of deep reorgs. - Ok(old_state - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch())) - }) - } - /// Iterates backwards across all `(state_root, slot)` pairs starting from /// an arbitrary `BeaconState` to the earliest reachable ancestor (may or may not be genesis). /// @@ -698,12 +628,12 @@ impl BeaconChain { &self, start_slot: Slot, ) -> Result> + '_, Error> { - let local_head = self.head()?; + let local_head = self.head_snapshot(); let iter = self.store.forwards_state_roots_iterator( start_slot, local_head.beacon_state_root(), - local_head.beacon_state, + local_head.beacon_state.clone(), &self.spec, )?; @@ -958,11 +888,11 @@ impl BeaconChain { pub async fn get_block_checking_early_attester_cache( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result>>, Error> { if let Some(block) = self.early_attester_cache.get_block(*block_root) { return Ok(Some(block)); } - self.get_block(block_root).await + Ok(self.get_block(block_root).await?.map(Arc::new)) } /// Returns the block at the given root, if any. @@ -1048,52 +978,6 @@ impl BeaconChain { Ok(self.store.get_state(state_root, slot)?) } - /// Returns a `Checkpoint` representing the head block and state. Contains the "best block"; - /// the head of the canonical `BeaconChain`. - /// - /// It is important to note that the `beacon_state` returned may not match the present slot. It - /// is the state as it was when the head block was received, which could be some slots prior to - /// now. - pub fn head(&self) -> Result, Error> { - self.with_head(|head| Ok(head.clone())) - } - - /// Apply a function to the canonical head without cloning it. - pub fn with_head( - &self, - f: impl FnOnce(&BeaconSnapshot) -> Result, - ) -> Result - where - E: From, - { - let head_lock = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)?; - f(&head_lock) - } - - /// Returns the beacon block root at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_block_root(&self) -> Result { - self.with_head(|s| Ok(s.beacon_block_root)) - } - - /// Returns the beacon block at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_block(&self) -> Result, Error> { - self.with_head(|s| Ok(s.beacon_block.clone())) - } - - /// Returns the beacon state at the head of the canonical chain. - /// - /// See `Self::head` for more information. - pub fn head_beacon_state(&self) -> Result, Error> { - self.with_head(|s| Ok(s.beacon_state.clone())) - } - /// Return the sync committee at `slot + 1` from the canonical chain. /// /// This is useful when dealing with sync committee messages, because messages are signed @@ -1168,42 +1052,6 @@ impl BeaconChain { self.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots) } - /// Returns info representing the head block and state. - /// - /// A summarized version of `Self::head` that involves less cloning. - pub fn head_info(&self) -> Result { - self.with_head(|head| { - let proposer_shuffling_decision_root = head - .beacon_state - .proposer_shuffling_decision_root(head.beacon_block_root)?; - - // The `random` value is used whilst producing an `ExecutionPayload` atop the head. - let current_epoch = head.beacon_state.current_epoch(); - let random = *head.beacon_state.get_randao_mix(current_epoch)?; - - Ok(HeadInfo { - slot: head.beacon_block.slot(), - block_root: head.beacon_block_root, - state_root: head.beacon_state_root(), - current_justified_checkpoint: head.beacon_state.current_justified_checkpoint(), - finalized_checkpoint: head.beacon_state.finalized_checkpoint(), - fork: head.beacon_state.fork(), - genesis_time: head.beacon_state.genesis_time(), - genesis_validators_root: head.beacon_state.genesis_validators_root(), - proposer_shuffling_decision_root, - is_merge_transition_complete: is_merge_transition_complete(&head.beacon_state), - execution_payload_block_hash: head - .beacon_block - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()), - random, - }) - }) - } - /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. /// /// Returns `(block_root, block_slot)`. @@ -1224,7 +1072,7 @@ impl BeaconChain { slot: Slot, config: StateSkipConfig, ) -> Result, Error> { - let head_state = self.head()?.beacon_state; + let head_state = self.head_beacon_state_cloned(); match slot.cmp(&head_state.slot()) { Ordering::Equal => Ok(head_state), @@ -1309,14 +1157,6 @@ impl BeaconChain { self.state_at_slot(self.slot()?, StateSkipConfig::WithStateRoots) } - /// Returns the slot of the highest block in the canonical chain. - pub fn best_slot(&self) -> Result { - self.canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .map(|head| head.beacon_block.slot()) - .ok_or(Error::CanonicalHeadLockTimeout) - } - /// Returns the validator index (if any) for the given public key. /// /// ## Notes @@ -1456,18 +1296,29 @@ impl BeaconChain { validator_indices: &[u64], epoch: Epoch, head_block_root: Hash256, - ) -> Result<(Vec>, Hash256), Error> { - self.with_committee_cache(head_block_root, epoch, |committee_cache, dependent_root| { - let duties = validator_indices - .iter() - .map(|validator_index| { - let validator_index = *validator_index as usize; - committee_cache.get_attestation_duties(validator_index) - }) - .collect(); + ) -> Result<(Vec>, Hash256, ExecutionStatus), Error> { + let execution_status = self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::AttestationHeadNotInForkChoice(head_block_root))?; - Ok((duties, dependent_root)) - }) + let (duties, dependent_root) = self.with_committee_cache( + head_block_root, + epoch, + |committee_cache, dependent_root| { + let duties = validator_indices + .iter() + .map(|validator_index| { + let validator_index = *validator_index as usize; + committee_cache.get_attestation_duties(validator_index) + }) + .collect(); + + Ok((duties, dependent_root)) + }, + )?; + Ok((duties, dependent_root, execution_status)) } /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. @@ -1514,8 +1365,8 @@ impl BeaconChain { ) -> Result, Error> { let beacon_block_root = attestation.data.beacon_block_root; match self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block_execution_status(&beacon_block_root) { // The attestation references a block that is not in fork choice, it must be @@ -1536,10 +1387,41 @@ impl BeaconChain { pub fn get_aggregated_sync_committee_contribution( &self, sync_contribution_data: &SyncContributionData, - ) -> Option> { - self.naive_sync_aggregation_pool + ) -> Result>, Error> { + if let Some(contribution) = self + .naive_sync_aggregation_pool .read() .get(sync_contribution_data) + { + self.filter_optimistic_sync_committee_contribution(contribution) + .map(Option::Some) + } else { + Ok(None) + } + } + + fn filter_optimistic_sync_committee_contribution( + &self, + contribution: SyncCommitteeContribution, + ) -> Result, Error> { + let beacon_block_root = contribution.beacon_block_root; + match self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&beacon_block_root) + { + // The contribution references a block that is not in fork choice, it must be + // pre-finalization. + None => Err(Error::SyncContributionDataReferencesFinalizedBlock { beacon_block_root }), + // The contribution references a fully valid `beacon_block_root`. + Some(execution_status) if execution_status.is_valid_or_irrelevant() => Ok(contribution), + // The contribution references a block that has not been verified by an EL (i.e. it + // is optimistic or invalid). Don't return the block, return an error instead. + Some(execution_status) => Err(Error::HeadBlockNotFullyVerified { + beacon_block_root, + execution_status, + }), + } } /// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`. @@ -1603,7 +1485,10 @@ impl BeaconChain { let current_epoch_attesting_info: Option<(Checkpoint, usize)>; let attester_cache_key; let head_timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_HEAD_SCRAPE_SECONDS); - if let Some(head) = self.canonical_head.try_read_for(HEAD_LOCK_TIMEOUT) { + // The following braces are to prevent the `cached_head` Arc from being held for longer than + // required. It also helps reduce the diff for a very large PR (#3244). + { + let head = self.head_snapshot(); let head_state = &head.beacon_state; head_state_slot = head_state.slot(); @@ -1678,15 +1563,13 @@ impl BeaconChain { // routine. attester_cache_key = AttesterCacheKey::new(request_epoch, head_state, beacon_block_root)?; - } else { - return Err(Error::CanonicalHeadLockTimeout); } drop(head_timer); // Only attest to a block if it is fully verified (i.e. not optimistic or invalid). match self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block_execution_status(&beacon_block_root) { Some(execution_status) if execution_status.is_valid_or_irrelevant() => (), @@ -1890,12 +1773,13 @@ impl BeaconChain { ) -> Result<(), Error> { let _timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); - self.fork_choice - .write() + self.canonical_head + .fork_choice_write_lock() .on_attestation( self.slot()?, verified.indexed_attestation(), AttestationFromBlock::False, + &self.spec, ) .map_err(Into::into) } @@ -2017,26 +1901,22 @@ impl BeaconChain { /// Accepts a `VerifiedAttestation` and attempts to apply it to `self.op_pool`. /// /// The op pool is used by local block producers to pack blocks with operations. - pub fn add_to_block_inclusion_pool( + pub fn add_to_block_inclusion_pool( &self, - verified_attestation: &impl VerifiedAttestation, - ) -> Result<(), AttestationError> { + verified_attestation: A, + ) -> Result<(), AttestationError> + where + A: VerifiedAttestation, + { let _timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_APPLY_TO_OP_POOL); // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. if self.eth1_chain.is_some() { - let fork = - self.with_head(|head| Ok::<_, AttestationError>(head.beacon_state.fork()))?; - + let (attestation, attesting_indices) = + verified_attestation.into_attestation_and_indices(); self.op_pool - .insert_attestation( - // TODO: address this clone. - verified_attestation.attestation().clone(), - &fork, - self.genesis_validators_root, - &self.spec, - ) + .insert_attestation(attestation, attesting_indices) .map_err(Error::from)?; } @@ -2069,15 +1949,15 @@ impl BeaconChain { pub fn filter_op_pool_attestation( &self, filter_cache: &mut HashMap<(Hash256, Epoch), bool>, - att: &Attestation, + att: &AttestationRef, state: &BeaconState, ) -> bool { *filter_cache - .entry((att.data.beacon_block_root, att.data.target.epoch)) + .entry((att.data.beacon_block_root, att.checkpoint.target_epoch)) .or_insert_with(|| { self.shuffling_is_compatible( &att.data.beacon_block_root, - att.data.target.epoch, + att.checkpoint.target_epoch, state, ) }) @@ -2132,7 +2012,7 @@ impl BeaconChain { // pivot block is the same as the current state's pivot block. If it is, then the // attestation's shuffling is the same as the current state's. // To account for skipped slots, find the first block at *or before* the pivot slot. - let fork_choice_lock = self.fork_choice.read(); + let fork_choice_lock = self.canonical_head.fork_choice_read_lock(); let pivot_block_root = fork_choice_lock .proto_array() .core_proto_array() @@ -2159,7 +2039,7 @@ impl BeaconChain { pub fn verify_voluntary_exit_for_gossip( &self, exit: SignedVoluntaryExit, - ) -> Result, Error> { + ) -> Result, Error> { // NOTE: this could be more efficient if it avoided cloning the head state let wall_clock_state = self.wall_clock_state()?; Ok(self @@ -2180,7 +2060,7 @@ impl BeaconChain { } /// Accept a pre-verified exit and queue it for inclusion in an appropriate block. - pub fn import_voluntary_exit(&self, exit: SigVerifiedOp) { + pub fn import_voluntary_exit(&self, exit: SigVerifiedOp) { if self.eth1_chain.is_some() { self.op_pool.insert_voluntary_exit(exit) } @@ -2190,7 +2070,7 @@ impl BeaconChain { pub fn verify_proposer_slashing_for_gossip( &self, proposer_slashing: ProposerSlashing, - ) -> Result, Error> { + ) -> Result, Error> { let wall_clock_state = self.wall_clock_state()?; Ok(self.observed_proposer_slashings.lock().verify_and_observe( proposer_slashing, @@ -2200,7 +2080,10 @@ impl BeaconChain { } /// Accept some proposer slashing and queue it for inclusion in an appropriate block. - pub fn import_proposer_slashing(&self, proposer_slashing: SigVerifiedOp) { + pub fn import_proposer_slashing( + &self, + proposer_slashing: SigVerifiedOp, + ) { if self.eth1_chain.is_some() { self.op_pool.insert_proposer_slashing(proposer_slashing) } @@ -2210,7 +2093,7 @@ impl BeaconChain { pub fn verify_attester_slashing_for_gossip( &self, attester_slashing: AttesterSlashing, - ) -> Result>, Error> { + ) -> Result, T::EthSpec>, Error> { let wall_clock_state = self.wall_clock_state()?; Ok(self.observed_attester_slashings.lock().verify_and_observe( attester_slashing, @@ -2219,16 +2102,23 @@ impl BeaconChain { )?) } - /// Accept some attester slashing and queue it for inclusion in an appropriate block. + /// Accept a verified attester slashing and: + /// + /// 1. Apply it to fork choice. + /// 2. Add it to the op pool. pub fn import_attester_slashing( &self, - attester_slashing: SigVerifiedOp>, - ) -> Result<(), Error> { + attester_slashing: SigVerifiedOp, T::EthSpec>, + ) { + // Add to fork choice. + self.canonical_head + .fork_choice_write_lock() + .on_attester_slashing(attester_slashing.as_inner()); + + // Add to the op pool (if we have the ability to propose blocks). if self.eth1_chain.is_some() { - self.op_pool - .insert_attester_slashing(attester_slashing, self.head_info()?.fork) + self.op_pool.insert_attester_slashing(attester_slashing) } - Ok(()) } /// Attempt to obtain sync committee duties from the head. @@ -2244,22 +2134,36 @@ impl BeaconChain { }) } - /// Attempt to verify and import a chain of blocks to `self`. + /// A convenience method for spawning a blocking task. It maps an `Option` and + /// `tokio::JoinError` into a single `BeaconChainError`. + pub(crate) async fn spawn_blocking_handle( + &self, + task: F, + name: &'static str, + ) -> Result + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let handle = self + .task_executor + .spawn_blocking_handle(task, name) + .ok_or(Error::RuntimeShutdown)?; + + handle.await.map_err(Error::TokioJoin) + } + + /// Accepts a `chain_segment` and filters out any uninteresting blocks (e.g., pre-finalization + /// or already-known). /// - /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., - /// be a chain). An error will be returned if this is not the case. - /// - /// This operation is not atomic; if one of the blocks in the chain is invalid then some prior - /// blocks might be imported. - /// - /// This method is generally much more efficient than importing each block using - /// `Self::process_block`. - pub fn process_chain_segment( + /// This method is potentially long-running and should not run on the core executor. + pub fn filter_chain_segment( self: &Arc, - chain_segment: Vec>, - ) -> ChainSegmentResult { + chain_segment: Vec>>, + ) -> Result>, ChainSegmentResult> { + // This function will never import any blocks. + let imported_blocks = 0; let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len()); - let mut imported_blocks = 0; // Produce a list of the parent root and slot of the child of each block. // @@ -2273,10 +2177,10 @@ impl BeaconChain { for (i, block) in chain_segment.into_iter().enumerate() { // Ensure the block is the correct structure for the fork at `block.slot()`. if let Err(e) = block.fork_name(&self.spec) { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::InconsistentFork(e), - }; + }); } let block_root = get_block_root(&block); @@ -2288,18 +2192,18 @@ impl BeaconChain { // Without this check it would be possible to have a block verified using the // incorrect shuffling. That would be bad, mmkay. if block_root != *child_parent_root { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearParentRoots, - }; + }); } // Ensure that the slots are strictly increasing throughout the chain segment. if *child_slot <= block.slot() { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NonLinearSlots, - }; + }); } } @@ -2327,18 +2231,18 @@ impl BeaconChain { // The block has a known parent that does not descend from the finalized block. // There is no need to process this block or any children. Err(BlockError::NotFinalizedDescendant { block_parent_root }) => { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::NotFinalizedDescendant { block_parent_root }, - }; + }); } // If there was an error whilst determining if the block was invalid, return that // error. Err(BlockError::BeaconChainError(e)) => { - return ChainSegmentResult::Failed { + return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::BeaconChainError(e), - }; + }); } // If the block was decided to be irrelevant for any other reason, don't include // this block or any of it's children in the filtered chain segment. @@ -2346,12 +2250,51 @@ impl BeaconChain { } } + Ok(filtered_chain_segment) + } + + /// Attempt to verify and import a chain of blocks to `self`. + /// + /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., + /// be a chain). An error will be returned if this is not the case. + /// + /// This operation is not atomic; if one of the blocks in the chain is invalid then some prior + /// blocks might be imported. + /// + /// This method is generally much more efficient than importing each block using + /// `Self::process_block`. + pub async fn process_chain_segment( + self: &Arc, + chain_segment: Vec>>, + count_unrealized: CountUnrealized, + ) -> ChainSegmentResult { + let mut imported_blocks = 0; + + // Filter uninteresting blocks from the chain segment in a blocking task. + let chain = self.clone(); + let filtered_chain_segment_future = self.spawn_blocking_handle( + move || chain.filter_chain_segment(chain_segment), + "filter_chain_segment", + ); + let mut filtered_chain_segment = match filtered_chain_segment_future.await { + Ok(Ok(filtered_segment)) => filtered_segment, + Ok(Err(segment_result)) => return segment_result, + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::BeaconChainError(error), + } + } + }; + while let Some((_root, block)) = filtered_chain_segment.first() { // Determine the epoch of the first block in the remaining segment. let start_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - // The `last_index` indicates the position of the last block that is in the current - // epoch of `start_epoch`. + // The `last_index` indicates the position of the first block in an epoch greater + // than the current epoch: partitioning the blocks into a run of blocks in the same + // epoch and everything else. These same-epoch blocks can all be signature-verified with + // the same `BeaconState`. let last_index = filtered_chain_segment .iter() .position(|(_root, block)| { @@ -2359,26 +2302,38 @@ impl BeaconChain { }) .unwrap_or(filtered_chain_segment.len()); - // Split off the first section blocks that are all either within the current epoch of - // the first block. These blocks can all be signature-verified with the same - // `BeaconState`. let mut blocks = filtered_chain_segment.split_off(last_index); std::mem::swap(&mut blocks, &mut filtered_chain_segment); + let chain = self.clone(); + let signature_verification_future = self.spawn_blocking_handle( + move || signature_verify_chain_segment(blocks, &chain), + "signature_verify_chain_segment", + ); + // Verify the signature of the blocks, returning early if the signature is invalid. - let signature_verified_blocks = match signature_verify_chain_segment(blocks, self) { - Ok(blocks) => blocks, - Err(error) => { + let signature_verified_blocks = match signature_verification_future.await { + Ok(Ok(blocks)) => blocks, + Ok(Err(error)) => { return ChainSegmentResult::Failed { imported_blocks, error, }; } + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::BeaconChainError(error), + }; + } }; // Import the blocks into the chain. for signature_verified_block in signature_verified_blocks { - match self.process_block(signature_verified_block) { + match self + .process_block(signature_verified_block, count_unrealized) + .await + { Ok(_) => imported_blocks += 1, Err(error) => { return ChainSegmentResult::Failed { @@ -2403,43 +2358,54 @@ impl BeaconChain { /// ## Errors /// /// Returns an `Err` if the given block was invalid, or an error was encountered during - pub fn verify_block_for_gossip( - &self, - block: SignedBeaconBlock, + pub async fn verify_block_for_gossip( + self: &Arc, + block: Arc>, ) -> Result, BlockError> { - let slot = block.slot(); - let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); + let chain = self.clone(); + self.task_executor + .clone() + .spawn_blocking_handle( + move || { + let slot = block.slot(); + let graffiti_string = block.message().body().graffiti().as_utf8_lossy(); - match GossipVerifiedBlock::new(block, self) { - Ok(verified) => { - debug!( - self.log, - "Successfully processed gossip block"; - "graffiti" => graffiti_string, - "slot" => slot, - "root" => ?verified.block_root(), - ); + match GossipVerifiedBlock::new(block, &chain) { + Ok(verified) => { + debug!( + chain.log, + "Successfully processed gossip block"; + "graffiti" => graffiti_string, + "slot" => slot, + "root" => ?verified.block_root(), + ); - Ok(verified) - } - Err(e) => { - debug!( - self.log, - "Rejected gossip block"; - "error" => e.to_string(), - "graffiti" => graffiti_string, - "slot" => slot, - ); + Ok(verified) + } + Err(e) => { + debug!( + chain.log, + "Rejected gossip block"; + "error" => e.to_string(), + "graffiti" => graffiti_string, + "slot" => slot, + ); - Err(e) - } - } + Err(e) + } + } + }, + "payload_verification_handle", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin)? } /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and /// imported into the chain. /// - /// Items that implement `IntoFullyVerifiedBlock` include: + /// Items that implement `IntoExecutionPendingBlock` include: /// /// - `SignedBeaconBlock` /// - `GossipVerifiedBlock` @@ -2448,9 +2414,10 @@ impl BeaconChain { /// /// Returns an `Err` if the given block was invalid, or an error was encountered during /// verification. - pub fn process_block>( + pub async fn process_block>( self: &Arc, unverified_block: B, + count_unrealized: CountUnrealized, ) -> Result> { // Start the Prometheus timer. let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); @@ -2462,13 +2429,16 @@ impl BeaconChain { let block = unverified_block.block().clone(); // A small closure to group the verification and import errors. - let import_block = |unverified_block: B| -> Result> { - let fully_verified = unverified_block.into_fully_verified_block(self)?; - self.import_block(fully_verified) + let chain = self.clone(); + let import_block = async move { + let execution_pending = unverified_block.into_execution_pending_block(&chain)?; + chain + .import_execution_pending_block(execution_pending, count_unrealized) + .await }; // Verify and import the block. - match import_block(unverified_block) { + match import_block.await { // The block was successfully verified and imported. Yay. Ok(block_root) => { trace!( @@ -2483,6 +2453,14 @@ impl BeaconChain { Ok(block_root) } + Err(e @ BlockError::BeaconChainError(BeaconChainError::TokioJoin(_))) => { + debug!( + self.log, + "Beacon block processing cancelled"; + "error" => ?e, + ); + Err(e) + } // There was an error whilst attempting to verify and import the block. The block might // be partially verified or partially imported. Err(BlockError::BeaconChainError(e)) => { @@ -2505,6 +2483,83 @@ impl BeaconChain { } } + /// Accepts a fully-verified block and imports it into the chain without performing any + /// additional verification. + /// + /// An error is returned if the block was unable to be imported. It may be partially imported + /// (i.e., this function is not atomic). + async fn import_execution_pending_block( + self: Arc, + execution_pending_block: ExecutionPendingBlock, + count_unrealized: CountUnrealized, + ) -> Result> { + let ExecutionPendingBlock { + block, + block_root, + state, + parent_block: _, + confirmed_state_roots, + payload_verification_handle, + } = execution_pending_block; + + let PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + } = payload_verification_handle + .await + .map_err(BeaconChainError::TokioJoin)? + .ok_or(BeaconChainError::RuntimeShutdown)??; + + // Log the PoS pandas if a merge transition just occurred. + if is_valid_merge_transition_block { + info!(self.log, "{}", POS_PANDA_BANNER); + info!( + self.log, + "Proof of Stake Activated"; + "slot" => block.slot() + ); + info!( + self.log, ""; + "Terminal POW Block Hash" => ?block + .message() + .execution_payload()? + .parent_hash() + .into_root() + ); + info!( + self.log, ""; + "Merge Transition Block Root" => ?block.message().tree_hash_root() + ); + info!( + self.log, ""; + "Merge Transition Execution Hash" => ?block + .message() + .execution_payload()? + .block_hash() + .into_root() + ); + } + + let chain = self.clone(); + let block_hash = self + .spawn_blocking_handle( + move || { + chain.import_block( + block, + block_root, + state, + confirmed_state_roots, + payload_verification_status, + count_unrealized, + ) + }, + "payload_verification_handle", + ) + .await??; + + Ok(block_hash) + } + /// Accepts a fully-verified block and imports it into the chain without performing any /// additional verification. /// @@ -2512,15 +2567,15 @@ impl BeaconChain { /// (i.e., this function is not atomic). fn import_block( &self, - fully_verified_block: FullyVerifiedBlock, + signed_block: Arc>, + block_root: Hash256, + mut state: BeaconState, + confirmed_state_roots: Vec, + payload_verification_status: PayloadVerificationStatus, + count_unrealized: CountUnrealized, ) -> Result> { - let signed_block = fully_verified_block.block; - let block_root = fully_verified_block.block_root; - let mut state = fully_verified_block.state; let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let mut ops = fully_verified_block.confirmation_db_batch; - let payload_verification_status = fully_verified_block.payload_verification_status; let attestation_observation_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); @@ -2593,21 +2648,24 @@ impl BeaconChain { .map_err(BeaconChainError::from)?; } - let mut fork_choice = self.fork_choice.write(); - - // Do not import a block that doesn't descend from the finalized root. - let signed_block = - check_block_is_finalized_descendant::(signed_block, &fork_choice, &self.store)?; - let (block, _) = signed_block.clone().deconstruct(); - - // compare the existing finalized checkpoint with the incoming block's finalized checkpoint - let old_finalized_checkpoint = fork_choice.finalized_checkpoint(); - let new_finalized_checkpoint = state.finalized_checkpoint(); + // Alias for readability. + let block = signed_block.message(); // Only perform the weak subjectivity check if it was configured. if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint { + // Note: we're using the finalized checkpoint from the head state, rather than fork + // choice. + // + // We are doing this to ensure that we detect changes in finalization. It's possible + // that fork choice has already been updated to the finalized checkpoint in the block + // we're importing. + let current_head_finalized_checkpoint = + self.canonical_head.cached_head().finalized_checkpoint(); + // Compare the existing finalized checkpoint with the incoming block's finalized checkpoint. + let new_finalized_checkpoint = state.finalized_checkpoint(); + // This ensures we only perform the check once. - if (old_finalized_checkpoint.epoch < wss_checkpoint.epoch) + if (current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch) && (wss_checkpoint.epoch <= new_finalized_checkpoint.epoch) { if let Err(e) = @@ -2619,7 +2677,7 @@ impl BeaconChain { "Weak subjectivity checkpoint verification failed while importing block!"; "block_root" => ?block_root, "parent_root" => ?block.parent_root(), - "old_finalized_epoch" => ?old_finalized_checkpoint.epoch, + "old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch, "new_finalized_epoch" => ?new_finalized_checkpoint.epoch, "weak_subjectivity_epoch" => ?wss_checkpoint.epoch, "error" => ?e, @@ -2635,6 +2693,13 @@ impl BeaconChain { } } + // Take an exclusive write-lock on fork choice. It's very important prevent deadlocks by + // avoiding taking other locks whilst holding this lock. + let mut fork_choice = self.canonical_head.fork_choice_write_lock(); + + // Do not import a block that doesn't descend from the finalized root. + check_block_is_finalized_descendant(self, &fork_choice, &signed_block)?; + // Register the new block with the fork choice service. { let _fork_choice_block_timer = @@ -2647,12 +2712,13 @@ impl BeaconChain { fork_choice .on_block( current_slot, - &block, + block, block_root, block_delay, &state, payload_verification_status, &self.spec, + count_unrealized.and(self.config.count_unrealized.into()), ) .map_err(|e| BlockError::BeaconChainError(e.into()))?; } @@ -2663,6 +2729,11 @@ impl BeaconChain { .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), &state); let validator_monitor = self.validator_monitor.read(); + // Register each attester slashing in the block with fork choice. + for attester_slashing in block.body().attester_slashings() { + fork_choice.on_attester_slashing(attester_slashing); + } + // Register each attestation in the block with the fork choice service. for attestation in block.body().attestations() { let _fork_choice_attestation_timer = @@ -2678,6 +2749,7 @@ impl BeaconChain { current_slot, &indexed_attestation, AttestationFromBlock::True, + &self.spec, ) { Ok(()) => Ok(()), // Ignore invalid attestations whilst importing attestations from a block. The @@ -2736,39 +2808,45 @@ impl BeaconChain { if !payload_verification_status.is_optimistic() && block.slot() + EARLY_ATTESTER_CACHE_HISTORIC_SLOTS >= current_slot { - let new_head_root = fork_choice - .get_head(current_slot, &self.spec) - .map_err(BeaconChainError::from)?; - - if new_head_root == block_root { - if let Some(proto_block) = fork_choice.get_block(&block_root) { - if let Err(e) = self.early_attester_cache.add_head_block( - block_root, - signed_block.clone(), - proto_block, - &state, - &self.spec, - ) { + match fork_choice.get_head(current_slot, &self.spec) { + // This block became the head, add it to the early attester cache. + Ok(new_head_root) if new_head_root == block_root => { + if let Some(proto_block) = fork_choice.get_block(&block_root) { + if let Err(e) = self.early_attester_cache.add_head_block( + block_root, + signed_block.clone(), + proto_block, + &state, + &self.spec, + ) { + warn!( + self.log, + "Early attester cache insert failed"; + "error" => ?e + ); + } else { + // Success, record the block as capable of being attested to. + self.block_times_cache.write().set_time_attestable( + block_root, + block.slot(), + timestamp_now(), + ); + } + } else { warn!( self.log, - "Early attester cache insert failed"; - "error" => ?e - ); - } else { - // Success, record the block as capable of being attested to. - self.block_times_cache.write().set_time_attestable( - block_root, - block.slot(), - timestamp_now(), + "Early attester block missing"; + "block_root" => ?block_root ); } - } else { - warn!( - self.log, - "Early attester block missing"; - "block_root" => ?block_root - ); } + // This block did not become the head, nothing to do. + Ok(_) => (), + Err(e) => error!( + self.log, + "Failed to compute head during block import"; + "error" => ?e + ), } } @@ -2829,7 +2907,11 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - ops.push(StoreOp::PutBlock(block_root, Box::new(signed_block))); + let mut ops: Vec<_> = confirmed_state_roots + .into_iter() + .map(StoreOp::DeleteStateTemporaryFlag) + .collect(); + ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); let txn_lock = self.store.hot_db.begin_rw_transaction(); @@ -2840,18 +2922,29 @@ impl BeaconChain { "msg" => "Restoring fork choice from disk", "error" => ?e, ); - match Self::load_fork_choice(self.store.clone())? { - Some(persisted_fork_choice) => { - *fork_choice = persisted_fork_choice; - } - None => { - crit!( - self.log, - "No stored fork choice found to restore from"; - "warning" => "The database is likely corrupt now, consider --purge-db" - ); - } + + // Since the write failed, try to revert the canonical head back to what was stored + // in the database. This attempts to prevent inconsistency between the database and + // fork choice. + if let Err(e) = self.canonical_head.restore_from_store( + fork_choice, + ResetPayloadStatuses::always_reset_conditionally( + self.config.always_reset_payload_statuses, + ), + self.config.count_unrealized_full, + &self.store, + &self.spec, + &self.log, + ) { + crit!( + self.log, + "No stored fork choice found to restore from"; + "error" => ?e, + "warning" => "The database is likely corrupt now, consider --purge-db" + ); + return Err(BlockError::BeaconChainError(e)); } + return Err(e.into()); } drop(txn_lock); @@ -2876,6 +2969,7 @@ impl BeaconChain { event_handler.register(EventKind::Block(SseBlock { slot, block: block_root, + execution_optimistic: payload_verification_status.is_optimistic(), })); } } @@ -2960,7 +3054,7 @@ impl BeaconChain { /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. - pub fn produce_block>( + pub async fn produce_block>( self: &Arc, randao_reveal: Signature, slot: Slot, @@ -2972,16 +3066,51 @@ impl BeaconChain { validator_graffiti, ProduceBlockVerification::VerifyRandao, ) + .await } /// Same as `produce_block` but allowing for configuration of RANDAO-verification. - pub fn produce_block_with_verification>( + pub async fn produce_block_with_verification>( self: &Arc, randao_reveal: Signature, slot: Slot, validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { + // Part 1/2 (blocking) + // + // Load the parent state from disk. + let chain = self.clone(); + let (state, state_root_opt) = self + .task_executor + .spawn_blocking_handle( + move || chain.load_state_for_block_production::(slot), + "produce_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/2 (async, with some blocking components) + // + // Produce the block upon the state + self.produce_block_on_state::( + state, + state_root_opt, + slot, + randao_reveal, + validator_graffiti, + verification, + ) + .await + } + + /// Load a beacon state from the database for block production. This is a long-running process + /// that should not be performed in an `async` context. + fn load_state_for_block_production>( + self: &Arc, + slot: Slot, + ) -> Result<(BeaconState, Option), BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); @@ -2990,15 +3119,23 @@ impl BeaconChain { drop(fork_choice_timer); let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); - let head_info = self - .head_info() - .map_err(BlockProductionError::UnableToGetHeadInfo)?; - let (state, state_root_opt) = if head_info.slot <= slot { + + // Atomically read some values from the head whilst avoiding holding cached head `Arc` any + // longer than necessary. + let (head_slot, head_block_root, head_state_root) = { + let head = self.canonical_head.cached_head(); + ( + head.head_slot(), + head.head_block_root(), + head.head_state_root(), + ) + }; + let (state, state_root_opt) = if head_slot <= slot { // Fetch the head state advanced through to `slot`, which should be present in the state // cache thanks to the state advance timer. let (state_root, state) = self .store - .get_advanced_state(head_info.block_root, slot, head_info.state_root) + .get_advanced_state(head_block_root, slot, head_state_root) .map_err(BlockProductionError::FailedToLoadState)? .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; (state, Some(state_root)) @@ -3015,16 +3152,10 @@ impl BeaconChain { (state, None) }; + drop(state_load_timer); - self.produce_block_on_state::( - state, - state_root_opt, - slot, - randao_reveal, - validator_graffiti, - verification, - ) + Ok((state, state_root_opt)) } /// Produce a block for some `slot` upon the given `state`. @@ -3039,15 +3170,79 @@ impl BeaconChain { /// The provided `state_root_opt` should only ever be set to `Some` if the contained value is /// equal to the root of `state`. Providing this value will serve as an optimization to avoid /// performing a tree hash in some scenarios. - pub fn produce_block_on_state>( - &self, - mut state: BeaconState, + pub async fn produce_block_on_state>( + self: &Arc, + state: BeaconState, state_root_opt: Option, produce_at_slot: Slot, randao_reveal: Signature, validator_graffiti: Option, verification: ProduceBlockVerification, ) -> Result, BlockProductionError> { + // Part 1/3 (blocking) + // + // Perform the state advance and block-packing functions. + let chain = self.clone(); + let mut partial_beacon_block = self + .task_executor + .spawn_blocking_handle( + move || { + chain.produce_partial_beacon_block( + state, + state_root_opt, + produce_at_slot, + randao_reveal, + validator_graffiti, + ) + }, + "produce_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/3 (async) + // + // Wait for the execution layer to return an execution payload (if one is required). + let prepare_payload_handle = partial_beacon_block.prepare_payload_handle.take(); + let execution_payload = if let Some(prepare_payload_handle) = prepare_payload_handle { + let execution_payload = prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??; + Some(execution_payload) + } else { + None + }; + + // Part 3/3 (blocking) + // + // Perform the final steps of combining all the parts and computing the state root. + let chain = self.clone(); + self.task_executor + .spawn_blocking_handle( + move || { + chain.complete_partial_beacon_block( + partial_beacon_block, + execution_payload, + verification, + ) + }, + "complete_partial_beacon_block", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)? + } + + fn produce_partial_beacon_block>( + self: &Arc, + mut state: BeaconState, + state_root_opt: Option, + produce_at_slot: Slot, + randao_reveal: Signature, + validator_graffiti: Option, + ) -> Result, BlockProductionError> { let eth1_chain = self .eth1_chain .as_ref() @@ -3079,25 +3274,52 @@ impl BeaconChain { state.latest_block_header().canonical_root() }; - let (proposer_slashings, attester_slashings, voluntary_exits) = + let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; + + let pubkey = state + .validators() + .get(proposer_index as usize) + .map(|v| *v.pubkey()) + .ok_or(BlockProductionError::BeaconChain( + BeaconChainError::ValidatorIndexUnknown(proposer_index as usize), + ))?; + + let builder_params = BuilderParams { + pubkey, + slot: state.slot(), + chain_health: self + .is_healthy(&parent_root) + .map_err(BlockProductionError::BeaconChain)?, + }; + + // If required, start the process of loading an execution payload from the EL early. This + // allows it to run concurrently with things like attestation packing. + let prepare_payload_handle = match &state { + BeaconState::Base(_) | BeaconState::Altair(_) => None, + BeaconState::Merge(_) => { + let prepare_payload_handle = + get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; + Some(prepare_payload_handle) + } + }; + + let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) = self.op_pool.get_slashings_and_exits(&state, &self.spec); let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; - let deposits = eth1_chain - .deposits_for_block_inclusion(&state, ð1_data, &self.spec)? - .into(); + let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; // Iterate through the naive aggregation pool and ensure all the attestations from there // are included in the operation pool. let unagg_import_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_UNAGGREGATED_TIMES); for attestation in self.naive_aggregation_pool.read().iter() { - if let Err(e) = self.op_pool.insert_attestation( - attestation.clone(), - &state.fork(), - state.genesis_validators_root(), - &self.spec, - ) { + let import = |attestation: &Attestation| { + let attesting_indices = get_attesting_indices_from_state(&state, attestation)?; + self.op_pool + .insert_attestation(attestation.clone(), attesting_indices) + }; + if let Err(e) = import(attestation) { // Don't stop block production if there's an error, just create a log. error!( self.log, @@ -3118,15 +3340,15 @@ impl BeaconChain { metrics::start_timer(&metrics::BLOCK_PRODUCTION_ATTESTATION_TIMES); let mut prev_filter_cache = HashMap::new(); - let prev_attestation_filter = |att: &&Attestation| { - self.filter_op_pool_attestation(&mut prev_filter_cache, *att, &state) + let prev_attestation_filter = |att: &AttestationRef| { + self.filter_op_pool_attestation(&mut prev_filter_cache, att, &state) }; let mut curr_filter_cache = HashMap::new(); - let curr_attestation_filter = |att: &&Attestation| { - self.filter_op_pool_attestation(&mut curr_filter_cache, *att, &state) + let curr_attestation_filter = |att: &AttestationRef| { + self.filter_op_pool_attestation(&mut curr_filter_cache, att, &state) }; - let attestations = self + let mut attestations = self .op_pool .get_attestations( &state, @@ -3134,16 +3356,87 @@ impl BeaconChain { curr_attestation_filter, &self.spec, ) - .map_err(BlockProductionError::OpPoolError)? - .into(); + .map_err(BlockProductionError::OpPoolError)?; drop(attestation_packing_timer); + // If paranoid mode is enabled re-check the signatures of every included message. + // This will be a lot slower but guards against bugs in block production and can be + // quickly rolled out without a release. + if self.config.paranoid_block_proposal { + attestations.retain(|att| { + verify_attestation_for_block_inclusion( + &state, + att, + VerifySignatures::True, + &self.spec, + ) + .map_err(|e| { + warn!( + self.log, + "Attempted to include an invalid attestation"; + "err" => ?e, + "block_slot" => state.slot(), + "attestation" => ?att + ); + }) + .is_ok() + }); + + proposer_slashings.retain(|slashing| { + slashing + .clone() + .validate(&state, &self.spec) + .map_err(|e| { + warn!( + self.log, + "Attempted to include an invalid proposer slashing"; + "err" => ?e, + "block_slot" => state.slot(), + "slashing" => ?slashing + ); + }) + .is_ok() + }); + + attester_slashings.retain(|slashing| { + slashing + .clone() + .validate(&state, &self.spec) + .map_err(|e| { + warn!( + self.log, + "Attempted to include an invalid attester slashing"; + "err" => ?e, + "block_slot" => state.slot(), + "slashing" => ?slashing + ); + }) + .is_ok() + }); + + voluntary_exits.retain(|exit| { + exit.clone() + .validate(&state, &self.spec) + .map_err(|e| { + warn!( + self.log, + "Attempted to include an invalid proposer slashing"; + "err" => ?e, + "block_slot" => state.slot(), + "exit" => ?exit + ); + }) + .is_ok() + }); + } + let slot = state.slot(); let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; - // Closure to fetch a sync aggregate in cases where it is required. - let get_sync_aggregate = || -> Result, BlockProductionError> { - Ok(self + let sync_aggregate = if matches!(&state, BeaconState::Base(_)) { + None + } else { + let sync_aggregate = self .op_pool .get_sync_aggregate(&state) .map_err(BlockProductionError::OpPoolError)? @@ -3154,9 +3447,54 @@ impl BeaconChain { "slot" => state.slot(), ); SyncAggregate::new() - })) + }); + Some(sync_aggregate) }; + Ok(PartialBeaconBlock { + state, + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + prepare_payload_handle, + }) + } + + fn complete_partial_beacon_block>( + &self, + partial_beacon_block: PartialBeaconBlock, + execution_payload: Option, + verification: ProduceBlockVerification, + ) -> Result, BlockProductionError> { + let PartialBeaconBlock { + mut state, + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + // We don't need the prepare payload handle since the `execution_payload` is passed into + // this function. We can assume that the handle has already been consumed in order to + // produce said `execution_payload`. + prepare_payload_handle: _, + } = partial_beacon_block; + let inner_block = match &state { BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase { slot, @@ -3169,56 +3507,51 @@ impl BeaconChain { graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), - attestations, - deposits, + attestations: attestations.into(), + deposits: deposits.into(), voluntary_exits: voluntary_exits.into(), _phantom: PhantomData, }, }), - BeaconState::Altair(_) => { - let sync_aggregate = get_sync_aggregate()?; - BeaconBlock::Altair(BeaconBlockAltair { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyAltair { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations, - deposits, - voluntary_exits: voluntary_exits.into(), - sync_aggregate, - _phantom: PhantomData, - }, - }) - } - BeaconState::Merge(_) => { - let sync_aggregate = get_sync_aggregate()?; - let execution_payload = - get_execution_payload::(self, &state, proposer_index)?; - BeaconBlock::Merge(BeaconBlockMerge { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyMerge { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings.into(), - attestations, - deposits, - voluntary_exits: voluntary_exits.into(), - sync_aggregate, - execution_payload, - }, - }) - } + BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyAltair { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + _phantom: PhantomData, + }, + }), + BeaconState::Merge(_) => BeaconBlock::Merge(BeaconBlockMerge { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: attestations.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: execution_payload + .ok_or(BlockProductionError::MissingExecutionPayload)?, + }, + }), }; let block = SignedBeaconBlock::from_block( @@ -3283,19 +3616,36 @@ impl BeaconChain { /// results in the justified checkpoint being invalidated. /// /// See the documentation of `InvalidationOperation` for information about defining `op`. - pub fn process_invalid_execution_payload( + pub async fn process_invalid_execution_payload( self: &Arc, op: &InvalidationOperation, ) -> Result<(), Error> { debug!( self.log, - "Invalid execution payload in block"; - "latest_valid_ancestor" => ?op.latest_valid_ancestor(), - "block_root" => ?op.block_root(), + "Processing payload invalidation"; + "op" => ?op, ); + // Update the execution status in fork choice. + // + // Use a blocking task since it interacts with the `canonical_head` lock. Lock contention + // on the core executor is bad. + let chain = self.clone(); + let inner_op = op.clone(); + let fork_choice_result = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_write_lock() + .on_invalid_execution_payload(&inner_op) + }, + "invalid_payload_fork_choice_update", + ) + .await?; + // Update fork choice. - if let Err(e) = self.fork_choice.write().on_invalid_execution_payload(op) { + if let Err(e) = fork_choice_result { crit!( self.log, "Failed to process invalid payload"; @@ -3307,19 +3657,24 @@ impl BeaconChain { // Run fork choice since it's possible that the payload invalidation might result in a new // head. - // - // Don't return early though, since invalidating the justified checkpoint might cause an - // error here. - if let Err(e) = self.fork_choice() { - crit!( - self.log, - "Failed to run fork choice routine"; - "error" => ?e, - ); - } + self.recompute_head_at_current_slot().await; - // Atomically obtain the justified root from fork choice. - let justified_block = self.fork_choice.read().get_justified_block()?; + // Obtain the justified root from fork choice. + // + // Use a blocking task since it interacts with the `canonical_head` lock. Lock contention + // on the core executor is bad. + let chain = self.clone(); + let justified_block = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_read_lock() + .get_justified_block() + }, + "invalid_payload_fork_choice_get_justified", + ) + .await??; if justified_block.execution_status.is_invalid() { crit!( @@ -3351,461 +3706,10 @@ impl BeaconChain { Ok(()) } - /// Execute the fork choice algorithm and enthrone the result as the canonical head. - pub fn fork_choice(self: &Arc) -> Result<(), Error> { - self.fork_choice_at_slot(self.slot()?) - } - - /// Execute fork choice at `slot`, processing queued attestations from `slot - 1` and earlier. - /// - /// The `slot` is not verified in any way, callers should ensure it corresponds to at most - /// one slot ahead of the current wall-clock slot. - pub fn fork_choice_at_slot(self: &Arc, slot: Slot) -> Result<(), Error> { - metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); - let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); - - let result = self.fork_choice_internal(slot); - - if result.is_err() { - metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); - } - - result - } - - fn fork_choice_internal(self: &Arc, slot: Slot) -> Result<(), Error> { - // Atomically obtain the head block root and the finalized block. - let (beacon_block_root, finalized_block) = { - let mut fork_choice = self.fork_choice.write(); - - // Determine the root of the block that is the head of the chain. - let beacon_block_root = fork_choice.get_head(slot, &self.spec)?; - - (beacon_block_root, fork_choice.get_finalized_block()?) - }; - - let current_head = self.head_info()?; - let old_finalized_checkpoint = current_head.finalized_checkpoint; - - // Exit early if the head hasn't changed. - if beacon_block_root == current_head.block_root { - return Ok(()); - } - - // Check to ensure that this finalized block hasn't been marked as invalid. - if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status { - crit!( - self.log, - "Finalized block has an invalid payload"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block_hash - ); - let mut shutdown_sender = self.shutdown_sender(); - shutdown_sender - .try_send(ShutdownReason::Failure( - "Finalized block has an invalid execution payload.", - )) - .map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?; - - // Exit now, the node is in an invalid state. - return Err(Error::InvalidFinalizedPayload { - finalized_root: finalized_block.root, - execution_block_hash: block_hash, - }); - } - - let lag_timer = metrics::start_timer(&metrics::FORK_CHOICE_SET_HEAD_LAG_TIMES); - - // At this point we know that the new head block is not the same as the previous one - metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); - - let new_head = { - let beacon_block = self - .store - .get_full_block(&beacon_block_root)? - .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; - - let beacon_state_root = beacon_block.state_root(); - let mut beacon_state: BeaconState = self - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; - beacon_state.build_all_committee_caches(&self.spec)?; - - BeaconSnapshot { - beacon_block, - beacon_block_root, - beacon_state, - } - }; - - // Attempt to detect if the new head is not on the same chain as the previous block - // (i.e., a re-org). - // - // Note: this will declare a re-org if we skip `SLOTS_PER_HISTORICAL_ROOT` blocks - // between calls to fork choice without swapping between chains. This seems like an - // extreme-enough scenario that a warning is fine. - let is_reorg = new_head - .beacon_state - .get_block_root(current_head.slot) - .map_or(true, |root| *root != current_head.block_root); - - let mut reorg_distance = Slot::new(0); - - if is_reorg { - match self.find_reorg_slot(&new_head.beacon_state, new_head.beacon_block_root) { - Ok(slot) => reorg_distance = current_head.slot.saturating_sub(slot), - Err(e) => { - warn!( - self.log, - "Could not find re-org depth"; - "error" => format!("{:?}", e), - ); - } - } - - metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); - metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT_INTEROP); - warn!( - self.log, - "Beacon chain re-org"; - "previous_head" => ?current_head.block_root, - "previous_slot" => current_head.slot, - "new_head_parent" => ?new_head.beacon_block.parent_root(), - "new_head" => ?beacon_block_root, - "new_slot" => new_head.beacon_block.slot(), - "reorg_distance" => reorg_distance, - ); - } else { - debug!( - self.log, - "Head beacon block"; - "justified_root" => ?new_head.beacon_state.current_justified_checkpoint().root, - "justified_epoch" => new_head.beacon_state.current_justified_checkpoint().epoch, - "finalized_root" => ?new_head.beacon_state.finalized_checkpoint().root, - "finalized_epoch" => new_head.beacon_state.finalized_checkpoint().epoch, - "root" => ?beacon_block_root, - "slot" => new_head.beacon_block.slot(), - ); - }; - - let new_finalized_checkpoint = new_head.beacon_state.finalized_checkpoint(); - - // It is an error to try to update to a head with a lesser finalized epoch. - if new_finalized_checkpoint.epoch < old_finalized_checkpoint.epoch { - return Err(Error::RevertedFinalizedEpoch { - previous_epoch: old_finalized_checkpoint.epoch, - new_epoch: new_finalized_checkpoint.epoch, - }); - } - - let is_epoch_transition = current_head.slot.epoch(T::EthSpec::slots_per_epoch()) - < new_head - .beacon_state - .slot() - .epoch(T::EthSpec::slots_per_epoch()); - - let update_head_timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); - - // These fields are used for server-sent events. - let state_root = new_head.beacon_state_root(); - let head_slot = new_head.beacon_state.slot(); - let head_proposer_index = new_head.beacon_block.message().proposer_index(); - let proposer_graffiti = new_head - .beacon_block - .message() - .body() - .graffiti() - .as_utf8_lossy(); - - // Find the dependent roots associated with this head before updating the snapshot. This - // is to ensure consistency when sending server sent events later in this method. - let dependent_root = new_head - .beacon_state - .proposer_shuffling_decision_root(self.genesis_block_root); - let prev_dependent_root = new_head - .beacon_state - .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); - - drop(lag_timer); - - // Clear the early attester cache in case it conflicts with `self.canonical_head`. - self.early_attester_cache.clear(); - - // Update the snapshot that stores the head of the chain at the time it received the - // block. - *self - .canonical_head - .try_write_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)? = new_head; - - // The block has now been set as head so we can record times and delays. - metrics::stop_timer(update_head_timer); - - let block_time_set_as_head = timestamp_now(); - - // Calculate the total delay between the start of the slot and when it was set as head. - let block_delay_total = - get_slot_delay_ms(block_time_set_as_head, head_slot, &self.slot_clock); - - // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to - // the cache during sync. - if block_delay_total < self.slot_clock.slot_duration() * 64 { - self.block_times_cache.write().set_time_set_as_head( - beacon_block_root, - current_head.slot, - block_time_set_as_head, - ); - } - - // If a block comes in from over 4 slots ago, it is most likely a block from sync. - let block_from_sync = block_delay_total > self.slot_clock.slot_duration() * 4; - - // Determine whether the block has been set as head too late for proper attestation - // production. - let attestation_deadline = self.slot_clock.unagg_attestation_production_delay(); - let late_head = block_delay_total >= attestation_deadline; - - // Do not store metrics if the block was > 4 slots old, this helps prevent noise during - // sync. - if !block_from_sync { - // Observe the total block delay. This is the delay between the time the slot started - // and when the block was set as head. - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, - block_delay_total, - ); - - // Observe the delay between when we imported the block and when we set the block as - // head. - let block_delays = self.block_times_cache.read().get_block_delays( - beacon_block_root, - self.slot_clock - .start_of(head_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - let observed_delay = block_delays - .observed - .unwrap_or_else(|| Duration::from_secs(0)); - let import_delay = block_delays - .imported - .unwrap_or_else(|| Duration::from_secs(0)); - let attestable_delay = block_delays - .attestable - .unwrap_or_else(|| Duration::from_secs(0)); - let set_as_head_delay = block_delays - .set_as_head - .unwrap_or_else(|| Duration::from_secs(0)); - - metrics::observe_duration( - &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, - observed_delay, - ); - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_ATTESTABLE_DELAY_TIME, - attestable_delay, - ); - metrics::observe_duration( - &metrics::BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME, - import_delay, - ); - metrics::observe_duration( - &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, - set_as_head_delay, - ); - - // If the block was enshrined as head too late for attestations to be created for it, - // log a debug warning and increment a metric. - let missed_attestation_deadline = attestable_delay >= attestation_deadline; - if missed_attestation_deadline { - let due_to_late_block = observed_delay >= attestation_deadline; - let due_to_borderline_late_block = - observed_delay + BORDERLINE_LATE_BLOCK_TOLERANCE >= attestation_deadline; - let due_to_processing = observed_delay + import_delay >= attestation_deadline; - - let reason = if due_to_late_block { - metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_MISSED_ATT_DEADLINE_LATE); - "late block" - } else if due_to_borderline_late_block { - metrics::inc_counter( - &metrics::BEACON_BLOCK_HEAD_MISSED_ATT_DEADLINE_BORDERLINE, - ); - "borderline late block" - } else if due_to_processing { - metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_MISSED_ATT_DEADLINE_SLOW); - "slow to process" - } else { - metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_MISSED_ATT_DEADLINE_OTHER); - "other" - }; - - debug!( - self.log, - "Delayed head block"; - "reason" => reason, - "block_root" => ?beacon_block_root, - "proposer_index" => head_proposer_index, - "slot" => head_slot, - "block_delay" => ?block_delay_total, - "observed_delay" => ?block_delays.observed, - "imported_delay" => ?block_delays.imported, - "attestable_delay" => ?attestable_delay, - "set_as_head_delay" => ?block_delays.set_as_head, - ); - } - } - - if is_epoch_transition || is_reorg { - self.persist_head_and_fork_choice()?; - self.op_pool.prune_attestations(self.epoch()?); - } - - if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch { - // Due to race conditions, it's technically possible that the head we load here is - // different to the one earlier in this function. - // - // Since the head can't move backwards in terms of finalized epoch, we can only load a - // head with a *later* finalized state. There is no harm in this. - let head = self - .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout)?; - - // State root of the finalized state on the epoch boundary, NOT the state - // of the finalized block. We need to use an iterator in case the state is beyond - // the reach of the new head's `state_roots` array. - let new_finalized_slot = head - .beacon_state - .finalized_checkpoint() - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let new_finalized_state_root = process_results( - StateRootsIterator::new(&self.store, &head.beacon_state), - |mut iter| { - iter.find_map(|(state_root, slot)| { - if slot == new_finalized_slot { - Some(state_root) - } else { - None - } - }) - }, - )? - .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; - - self.after_finalization(&head.beacon_state, new_finalized_state_root)?; - } - - // Register a server-sent event if necessary - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_head_subscribers() { - match (dependent_root, prev_dependent_root) { - (Ok(current_duty_dependent_root), Ok(previous_duty_dependent_root)) => { - event_handler.register(EventKind::Head(SseHead { - slot: head_slot, - block: beacon_block_root, - state: state_root, - current_duty_dependent_root, - previous_duty_dependent_root, - epoch_transition: is_epoch_transition, - })); - } - (Err(e), _) | (_, Err(e)) => { - warn!( - self.log, - "Unable to find dependent roots, cannot register head event"; - "error" => ?e - ); - } - } - } - - if is_reorg && event_handler.has_reorg_subscribers() { - event_handler.register(EventKind::ChainReorg(SseChainReorg { - slot: head_slot, - depth: reorg_distance.as_u64(), - old_head_block: current_head.block_root, - old_head_state: current_head.state_root, - new_head_block: beacon_block_root, - new_head_state: state_root, - epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), - })); - } - - if !block_from_sync && late_head && event_handler.has_late_head_subscribers() { - let peer_info = self - .block_times_cache - .read() - .get_peer_info(beacon_block_root); - let block_delays = self.block_times_cache.read().get_block_delays( - beacon_block_root, - self.slot_clock - .start_of(head_slot) - .unwrap_or_else(|| Duration::from_secs(0)), - ); - event_handler.register(EventKind::LateHead(SseLateHead { - slot: head_slot, - block: beacon_block_root, - peer_id: peer_info.id, - peer_client: peer_info.client, - proposer_index: head_proposer_index, - proposer_graffiti, - block_delay: block_delay_total, - observed_delay: block_delays.observed, - attestable_delay: block_delays.attestable, - imported_delay: block_delays.imported, - set_as_head_delay: block_delays.set_as_head, - })); - } - } - - // Update the execution layer. - // Always use the wall-clock slot to update the execution engine rather than the `slot` - // passed in. - if let Err(e) = self.update_execution_engine_forkchoice_blocking(self.slot()?) { - crit!( - self.log, - "Failed to update execution head"; - "error" => ?e - ); - } - - // Performing this call immediately after - // `update_execution_engine_forkchoice_blocking` might result in two calls to fork - // choice updated, one *without* payload attributes and then a second *with* - // payload attributes. - // - // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as - // far as I know. - if let Err(e) = self.prepare_beacon_proposer_blocking() { - crit!( - self.log, - "Failed to prepare proposers after fork choice"; - "error" => ?e - ); - } - - Ok(()) - } - - pub fn prepare_beacon_proposer_blocking(self: &Arc) -> Result<(), Error> { - let current_slot = self.slot()?; - - // Avoids raising an error before Bellatrix. - // - // See `Self::prepare_beacon_proposer_async` for more detail. - if self.slot_is_prior_to_bellatrix(current_slot + 1) { - return Ok(()); - } - - let execution_layer = self - .execution_layer - .as_ref() - .ok_or(Error::ExecutionLayerMissing)?; - - execution_layer - .block_on_generic(|_| self.prepare_beacon_proposer_async(current_slot)) - .map_err(Error::PrepareProposerBlockingFailed)? + pub fn block_is_known_to_fork_choice(&self, root: &Hash256) -> bool { + self.canonical_head + .fork_choice_read_lock() + .contains_block(root) } /// Determines the beacon proposer for the next slot. If that proposer is registered in the @@ -3820,7 +3724,7 @@ impl BeaconChain { /// 1. We're in the tail-end of the slot (as defined by PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR) /// 2. The head block is one slot (or less) behind the prepare slot (e.g., we're preparing for /// the next slot and the block at the current slot is already known). - pub async fn prepare_beacon_proposer_async( + pub async fn prepare_beacon_proposer( self: &Arc, current_slot: Slot, ) -> Result<(), Error> { @@ -3843,20 +3747,45 @@ impl BeaconChain { return Ok(()); } - let head = self.head_info()?; - let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); + // Atomically read some values from the canonical head, whilst avoiding holding the cached + // head `Arc` any longer than necessary. + // + // Use a blocking task since blocking the core executor on the canonical head read lock can + // block the core tokio executor. + let chain = self.clone(); + let (head_slot, head_root, head_decision_root, head_random, forkchoice_update_params) = + self.spawn_blocking_handle( + move || { + let cached_head = chain.canonical_head.cached_head(); + let head_block_root = cached_head.head_block_root(); + let decision_root = cached_head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root)?; + Ok::<_, Error>(( + cached_head.head_slot(), + head_block_root, + decision_root, + cached_head.head_random()?, + cached_head.forkchoice_update_parameters(), + )) + }, + "prepare_beacon_proposer_fork_choice_read", + ) + .await??; + let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); // Don't bother with proposer prep if the head is more than // `PREPARE_PROPOSER_HISTORIC_EPOCHS` prior to the current slot. // // This prevents the routine from running during sync. - if head.slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS + if head_slot + T::EthSpec::slots_per_epoch() * PREPARE_PROPOSER_HISTORIC_EPOCHS < current_slot { debug!( self.log, "Head too old for proposer prep"; - "head_slot" => head.slot, + "head_slot" => head_slot, "current_slot" => current_slot, ); return Ok(()); @@ -3865,9 +3794,9 @@ impl BeaconChain { // Ensure that the shuffling decision root is correct relative to the epoch we wish to // query. let shuffling_decision_root = if head_epoch == prepare_epoch { - head.proposer_shuffling_decision_root + head_decision_root } else { - head.block_root + head_root }; // Read the proposer from the proposer cache. @@ -3897,7 +3826,7 @@ impl BeaconChain { return Ok(()); } - let (proposers, decision_root, fork) = + let (proposers, decision_root, _, fork) = compute_proposer_duties_from_head(prepare_epoch, self)?; let proposer_index = prepare_slot.as_usize() % (T::EthSpec::slots_per_epoch() as usize); @@ -3943,7 +3872,7 @@ impl BeaconChain { .start_of(prepare_slot) .ok_or(Error::InvalidSlot(prepare_slot))? .as_secs(), - prev_randao: head.random, + prev_randao: head_random, suggested_fee_recipient: execution_layer .get_suggested_fee_recipient(proposer as u64) .await, @@ -3953,18 +3882,13 @@ impl BeaconChain { self.log, "Preparing beacon proposer"; "payload_attributes" => ?payload_attributes, - "head_root" => ?head.block_root, + "head_root" => ?head_root, "prepare_slot" => prepare_slot, "validator" => proposer, ); let already_known = execution_layer - .insert_proposer( - prepare_slot, - head.block_root, - proposer as u64, - payload_attributes, - ) + .insert_proposer(prepare_slot, head_root, proposer as u64, payload_attributes) .await; // Only push a log to the user if this is the first time we've seen this proposer for this // slot. @@ -4006,7 +3930,7 @@ impl BeaconChain { // known). if till_prepare_slot <= self.slot_clock.slot_duration() / PAYLOAD_PREPARATION_LOOKAHEAD_FACTOR - || head.slot + 1 >= prepare_slot + || head_slot + 1 >= prepare_slot { debug!( self.log, @@ -4015,37 +3939,17 @@ impl BeaconChain { "prepare_slot" => prepare_slot ); - self.update_execution_engine_forkchoice_async(current_slot) + self.update_execution_engine_forkchoice(current_slot, forkchoice_update_params) .await?; } Ok(()) } - pub fn update_execution_engine_forkchoice_blocking( - self: &Arc, - current_slot: Slot, - ) -> Result<(), Error> { - // Avoids raising an error before Bellatrix. - // - // See `Self::update_execution_engine_forkchoice_async` for more detail. - if self.slot_is_prior_to_bellatrix(current_slot + 1) { - return Ok(()); - } - - let execution_layer = self - .execution_layer - .as_ref() - .ok_or(Error::ExecutionLayerMissing)?; - - execution_layer - .block_on_generic(|_| self.update_execution_engine_forkchoice_async(current_slot)) - .map_err(Error::ForkchoiceUpdate)? - } - - pub async fn update_execution_engine_forkchoice_async( + pub async fn update_execution_engine_forkchoice( self: &Arc, current_slot: Slot, + params: ForkchoiceUpdateParameters, ) -> Result<(), Error> { let next_slot = current_slot + 1; @@ -4083,77 +3987,72 @@ impl BeaconChain { // `execution_engine_forkchoice_lock` apart from the one here. let forkchoice_lock = execution_layer.execution_engine_forkchoice_lock().await; - // Deadlock warning: - // - // We are taking the `self.fork_choice` lock whilst holding the `forkchoice_lock`. This - // is intentional, since it allows us to ensure a consistent ordering of messages to the - // execution layer. - let forkchoice_update_parameters = - self.fork_choice.read().get_forkchoice_update_parameters(); - let (head_block_root, head_hash, finalized_hash) = if let Some(params) = - forkchoice_update_parameters + let (head_block_root, head_hash, justified_hash, finalized_hash) = if let Some(head_hash) = + params.head_hash { - if let Some(head_hash) = params.head_hash { - ( - params.head_root, - head_hash, - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // The head block does not have an execution block hash. We must check to see if we - // happen to be the proposer of the transition block, in which case we still need to - // send forkchoice_updated. - match self.spec.fork_name_at_slot::(next_slot) { - // We are pre-bellatrix; no need to update the EL. - ForkName::Base | ForkName::Altair => return Ok(()), - _ => { - // We are post-bellatrix - if execution_layer - .payload_attributes(next_slot, params.head_root) + ( + params.head_root, + head_hash, + params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) + } else { + // The head block does not have an execution block hash. We must check to see if we + // happen to be the proposer of the transition block, in which case we still need to + // send forkchoice_updated. + match self.spec.fork_name_at_slot::(next_slot) { + // We are pre-bellatrix; no need to update the EL. + ForkName::Base | ForkName::Altair => return Ok(()), + _ => { + // We are post-bellatrix + if let Some(payload_attributes) = execution_layer + .payload_attributes(next_slot, params.head_root) + .await + { + // We are a proposer, check for terminal_pow_block_hash + if let Some(terminal_pow_block_hash) = execution_layer + .get_terminal_pow_block_hash(&self.spec, payload_attributes.timestamp) .await - .is_some() + .map_err(Error::ForkchoiceUpdate)? { - // We are a proposer, check for terminal_pow_block_hash - if let Some(terminal_pow_block_hash) = execution_layer - .get_terminal_pow_block_hash(&self.spec) - .await - .map_err(Error::ForkchoiceUpdate)? - { - info!( - self.log, - "Prepared POS transition block proposer"; "slot" => next_slot - ); - ( - params.head_root, - terminal_pow_block_hash, - params - .finalized_hash - .unwrap_or_else(ExecutionBlockHash::zero), - ) - } else { - // TTD hasn't been reached yet, no need to update the EL. - return Ok(()); - } + info!( + self.log, + "Prepared POS transition block proposer"; "slot" => next_slot + ); + ( + params.head_root, + terminal_pow_block_hash, + params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), + params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + ) } else { - // We are not a proposer, no need to update the EL. + // TTD hasn't been reached yet, no need to update the EL. return Ok(()); } + } else { + // We are not a proposer, no need to update the EL. + return Ok(()); } } } - } else { - warn!( - self.log, - "Missing forkchoice params"; - "msg" => "please report this non-critical bug" - ); - return Ok(()); }; let forkchoice_updated_response = execution_layer - .notify_forkchoice_updated(head_hash, finalized_hash, current_slot, head_block_root) + .notify_forkchoice_updated( + head_hash, + justified_hash, + finalized_hash, + current_slot, + head_block_root, + ) .await .map_err(Error::ExecutionForkChoiceUpdateFailed); @@ -4165,11 +4064,19 @@ impl BeaconChain { Ok(status) => match status { PayloadStatus::Valid => { // Ensure that fork choice knows that the block is no longer optimistic. - if let Err(e) = self - .fork_choice - .write() - .on_valid_execution_payload(head_block_root) - { + let chain = self.clone(); + let fork_choice_update_result = self + .spawn_blocking_handle( + move || { + chain + .canonical_head + .fork_choice_write_lock() + .on_valid_execution_payload(head_block_root) + }, + "update_execution_engine_valid_payload", + ) + .await?; + if let Err(e) = fork_choice_update_result { error!( self.log, "Failed to validate payload"; @@ -4196,38 +4103,59 @@ impl BeaconChain { Ok(()) } PayloadStatus::Invalid { - latest_valid_hash, .. + latest_valid_hash, + ref validation_error, } => { + debug!( + self.log, + "Invalid execution payload"; + "validation_error" => ?validation_error, + "latest_valid_hash" => ?latest_valid_hash, + "head_hash" => ?head_hash, + "head_block_root" => ?head_block_root, + "method" => "fcU", + ); warn!( self.log, "Fork choice update invalidated payload"; "status" => ?status ); - // The execution engine has stated that all blocks between the - // `head_execution_block_hash` and `latest_valid_hash` are invalid. - let chain = self.clone(); - execution_layer - .executor() - .spawn_blocking_handle( - move || { - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { - head_block_root, - always_invalidate_head: true, - latest_valid_ancestor: latest_valid_hash, - }, - ) + + // This implies that the terminal block was invalid. We are being explicit in + // invalidating only the head block in this case. + if latest_valid_hash == ExecutionBlockHash::zero() { + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: head_block_root, }, - "process_invalid_execution_payload_many", ) - .ok_or(BeaconChainError::RuntimeShutdown)? - .await - .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; + .await?; + } else { + // The execution engine has stated that all blocks between the + // `head_execution_block_hash` and `latest_valid_hash` are invalid. + self.process_invalid_execution_payload( + &InvalidationOperation::InvalidateMany { + head_block_root, + always_invalidate_head: true, + latest_valid_ancestor: latest_valid_hash, + }, + ) + .await?; + } Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } - PayloadStatus::InvalidTerminalBlock { .. } - | PayloadStatus::InvalidBlockHash { .. } => { + PayloadStatus::InvalidBlockHash { + ref validation_error, + } => { + debug!( + self.log, + "Invalid execution payload block hash"; + "validation_error" => ?validation_error, + "head_hash" => ?head_hash, + "head_block_root" => ?head_block_root, + "method" => "fcU", + ); warn!( self.log, "Fork choice update invalidated payload"; @@ -4238,22 +4166,10 @@ impl BeaconChain { // // Using a `None` latest valid ancestor will result in only the head block // being invalidated (no ancestors). - let chain = self.clone(); - execution_layer - .executor() - .spawn_blocking_handle( - move || { - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: head_block_root, - }, - ) - }, - "process_invalid_execution_payload_single", - ) - .ok_or(BeaconChainError::RuntimeShutdown)? - .await - .map_err(BeaconChainError::ProcessInvalidExecutionPayload)??; + self.process_invalid_execution_payload(&InvalidationOperation::InvalidateOne { + block_root: head_block_root, + }) + .await?; Err(BeaconChainError::ExecutionForkChoiceUpdateInvalid { status }) } @@ -4263,30 +4179,86 @@ impl BeaconChain { } /// Returns `true` if the given slot is prior to the `bellatrix_fork_epoch`. - fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { + pub fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { self.spec.bellatrix_fork_epoch.map_or(true, |bellatrix| { slot.epoch(T::EthSpec::slots_per_epoch()) < bellatrix }) } - /// Returns the status of the current head block, regarding the validity of the execution - /// payload. - pub fn head_safety_status(&self) -> Result { - let head = self.head_info()?; - let head_block = self - .fork_choice - .read() - .get_block(&head.block_root) - .ok_or(BeaconChainError::HeadMissingFromForkChoice(head.block_root))?; + /// Returns the value of `execution_optimistic` for `block`. + /// + /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or has + /// `ExecutionStatus::Invalid`. + pub fn is_optimistic_or_invalid_block>( + &self, + block: &SignedBeaconBlock, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(block.slot()) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&block.canonical_root()) + .map_err(BeaconChainError::ForkChoiceError) + } + } - let status = match head_block.execution_status { - ExecutionStatus::Valid(block_hash) => HeadSafetyStatus::Safe(Some(block_hash)), - ExecutionStatus::Invalid(block_hash) => HeadSafetyStatus::Invalid(block_hash), - ExecutionStatus::Optimistic(block_hash) => HeadSafetyStatus::Unsafe(block_hash), - ExecutionStatus::Irrelevant(_) => HeadSafetyStatus::Safe(None), - }; + /// Returns the value of `execution_optimistic` for `head_block`. + /// + /// Returns `Ok(false)` if the block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the block has `ExecutionStatus::Optimistic` or `ExecutionStatus::Invalid`. + /// + /// This function will return an error if `head_block` is not present in the fork choice store + /// and so should only be used on the head block or when the block *should* be present in the + /// fork choice store. + /// + /// There is a potential race condition when syncing where the block_root of `head_block` could + /// be pruned from the fork choice store before being read. + pub fn is_optimistic_or_invalid_head_block>( + &self, + head_block: &SignedBeaconBlock, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(head_block.slot()) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block_no_fallback(&head_block.canonical_root()) + .map_err(BeaconChainError::ForkChoiceError) + } + } - Ok(status) + /// Returns the value of `execution_optimistic` for the current head block. + /// You can optionally provide `head_info` if it was computed previously. + /// + /// Returns `Ok(false)` if the head block is pre-Bellatrix, or has `ExecutionStatus::Valid`. + /// Returns `Ok(true)` if the head block has `ExecutionStatus::Optimistic` or `ExecutionStatus::Invalid`. + /// + /// There is a potential race condition when syncing where the block root of `head_info` could + /// be pruned from the fork choice store before being read. + pub fn is_optimistic_or_invalid_head(&self) -> Result { + self.canonical_head + .head_execution_status() + .map(|status| status.is_optimistic_or_invalid()) + } + + pub fn is_optimistic_or_invalid_block_root( + &self, + block_slot: Slot, + block_root: &Hash256, + ) -> Result { + // Check if the block is pre-Bellatrix. + if self.slot_is_prior_to_bellatrix(block_slot) { + Ok(false) + } else { + self.canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block_no_fallback(block_root) + .map_err(BeaconChainError::ForkChoiceError) + } } /// This function takes a configured weak subjectivity `Checkpoint` and the latest finalized `Checkpoint`. @@ -4348,78 +4320,51 @@ impl BeaconChain { /// Note: this function **MUST** be called from a non-async context since /// it contains a call to `fork_choice` which may eventually call /// `tokio::runtime::block_on` in certain cases. - pub fn per_slot_task(self: &Arc) { - trace!(self.log, "Running beacon chain per slot tasks"); + pub async fn per_slot_task(self: &Arc) { if let Some(slot) = self.slot_clock.now() { - // Run fork choice and signal to any waiting task that it has completed. - if let Err(e) = self.fork_choice() { - error!( - self.log, - "Fork choice error at slot start"; - "error" => ?e, - "slot" => slot, - ); + debug!( + self.log, + "Running beacon chain per slot tasks"; + "slot" => ?slot + ); + + // Always run the light-weight pruning tasks (these structures should be empty during + // sync anyway). + self.naive_aggregation_pool.write().prune(slot); + self.block_times_cache.write().prune(slot); + + // Don't run heavy-weight tasks during sync. + if self.best_slot() + MAX_PER_SLOT_FORK_CHOICE_DISTANCE < slot { + return; } + // Run fork choice and signal to any waiting task that it has completed. + self.recompute_head_at_current_slot().await; + // Send the notification regardless of fork choice success, this is a "best effort" // notification and we don't want block production to hit the timeout in case of error. - if let Some(tx) = &self.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(slot) { - warn!( - self.log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => slot, - ); - } - } - - self.naive_aggregation_pool.write().prune(slot); - self.block_times_cache.write().prune(slot); + // Use a blocking task to avoid blocking the core executor whilst waiting for locks + // in `ForkChoiceSignalTx`. + let chain = self.clone(); + self.task_executor.clone().spawn_blocking( + move || { + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(slot) { + warn!( + chain.log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => slot, + ); + } + } + }, + "per_slot_task_fc_signal_tx", + ); } } - /// Called after `self` has had a new block finalized. - /// - /// Performs pruning and finality-based optimizations. - fn after_finalization( - &self, - head_state: &BeaconState, - new_finalized_state_root: Hash256, - ) -> Result<(), Error> { - self.fork_choice.write().prune()?; - let new_finalized_checkpoint = head_state.finalized_checkpoint(); - - self.observed_block_producers.write().prune( - new_finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - ); - - self.op_pool.prune_all(head_state, self.epoch()?); - - self.store_migrator.process_finalization( - new_finalized_state_root.into(), - new_finalized_checkpoint, - self.head_tracker.clone(), - )?; - - self.attester_cache - .prune_below(new_finalized_checkpoint.epoch); - - if let Some(event_handler) = self.event_handler.as_ref() { - if event_handler.has_finalized_subscribers() { - event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { - epoch: new_finalized_checkpoint.epoch, - block: new_finalized_checkpoint.root, - state: new_finalized_state_root, - })); - } - } - - Ok(()) - } - /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head /// `head_block_root`. The `map_fn` will be supplied two values: /// @@ -4458,8 +4403,8 @@ impl BeaconChain { F: Fn(&CommitteeCache, Hash256) -> Result, { let head_block = self - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&head_block_root) .ok_or(Error::MissingBeaconBlock(head_block_root))?; @@ -4598,10 +4543,13 @@ impl BeaconChain { ) -> Result>>, Error> { let mut dump = vec![]; - let mut last_slot = BeaconSnapshot { - beacon_block: self.head()?.beacon_block.into(), - beacon_block_root: self.head()?.beacon_block_root, - beacon_state: self.head()?.beacon_state, + let mut last_slot = { + let head = self.canonical_head.cached_head(); + BeaconSnapshot { + beacon_block: Arc::new(head.snapshot.beacon_block.clone_as_blinded()), + beacon_block_root: head.snapshot.beacon_block_root, + beacon_state: head.snapshot.beacon_state.clone(), + } }; dump.push(last_slot.clone()); @@ -4628,7 +4576,7 @@ impl BeaconChain { })?; let slot = BeaconSnapshot { - beacon_block, + beacon_block: Arc::new(beacon_block), beacon_block_root, beacon_state, }; @@ -4665,13 +4613,87 @@ impl BeaconChain { .map(|duration| (fork_name, duration)) } - pub fn dump_as_dot(&self, output: &mut W) { - let canonical_head_hash = self + /// This method serves to get a sense of the current chain health. It is used in block proposal + /// to determine whether we should outsource payload production duties. + /// + /// Since we are likely calling this during the slot we are going to propose in, don't take into + /// account the current slot when accounting for skips. + pub fn is_healthy(&self, parent_root: &Hash256) -> Result { + // Check if the merge has been finalized. + if let Some(finalized_hash) = self .canonical_head - .try_read_for(HEAD_LOCK_TIMEOUT) - .ok_or(Error::CanonicalHeadLockTimeout) - .unwrap() - .beacon_block_root; + .cached_head() + .forkchoice_update_parameters() + .finalized_hash + { + if ExecutionBlockHash::zero() == finalized_hash { + return Ok(ChainHealth::PreMerge); + } + } else { + return Ok(ChainHealth::PreMerge); + }; + + // Check that the parent is NOT optimistic. + if let Some(execution_status) = self + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(parent_root) + { + if execution_status.is_strictly_optimistic() { + return Ok(ChainHealth::Optimistic); + } + } + + if self.config.builder_fallback_disable_checks { + return Ok(ChainHealth::Healthy); + } + + let current_slot = self.slot()?; + + // Check slots at the head of the chain. + let prev_slot = current_slot.saturating_sub(Slot::new(1)); + let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); + let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; + + // Check if finalization is advancing. + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + let epochs_since_finalization = current_epoch.saturating_sub( + self.canonical_head + .cached_head() + .finalized_checkpoint() + .epoch, + ); + let finalization_check = epochs_since_finalization.as_usize() + <= self.config.builder_fallback_epochs_since_finalization; + + // Check skip slots in the last `SLOTS_PER_EPOCH`. + let start_slot = current_slot.saturating_sub(T::EthSpec::slots_per_epoch()); + let mut epoch_skips = 0; + for slot in start_slot.as_u64()..current_slot.as_u64() { + if self + .block_root_at_slot_skips_none(Slot::new(slot))? + .is_none() + { + epoch_skips += 1; + } + } + let epoch_skips_check = epoch_skips <= self.config.builder_fallback_skips_per_epoch; + + if !head_skips_check { + Ok(ChainHealth::Unhealthy(FailedCondition::Skips)) + } else if !finalization_check { + Ok(ChainHealth::Unhealthy( + FailedCondition::EpochsSinceFinalization, + )) + } else if !epoch_skips_check { + Ok(ChainHealth::Unhealthy(FailedCondition::SkipsPerEpoch)) + } else { + Ok(ChainHealth::Healthy) + } + } + + pub fn dump_as_dot(&self, output: &mut W) { + let canonical_head_hash = self.canonical_head.cached_head().head_block_root(); let mut visited: HashSet = HashSet::new(); let mut finalized_blocks: HashSet = HashSet::new(); let mut justified_blocks: HashSet = HashSet::new(); diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 450e7c11c9..dc8ac21c21 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -8,13 +8,14 @@ use crate::{metrics, BeaconSnapshot}; use derivative::Derivative; use fork_choice::ForkChoiceStore; use ssz_derive::{Decode, Encode}; +use std::collections::BTreeSet; use std::marker::PhantomData; use std::sync::Arc; use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ - BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, Hash256, - Slot, + BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, ExecPayload, + Hash256, Slot, }; #[derive(Debug)] @@ -155,7 +156,10 @@ pub struct BeaconForkChoiceStore, Cold: ItemStore< justified_checkpoint: Checkpoint, justified_balances: Vec, best_justified_checkpoint: Checkpoint, + unrealized_justified_checkpoint: Checkpoint, + unrealized_finalized_checkpoint: Checkpoint, proposer_boost_root: Hash256, + equivocating_indices: BTreeSet, _phantom: PhantomData, } @@ -202,7 +206,10 @@ where justified_balances: anchor_state.balances().to_vec(), finalized_checkpoint, best_justified_checkpoint: justified_checkpoint, + unrealized_justified_checkpoint: justified_checkpoint, + unrealized_finalized_checkpoint: finalized_checkpoint, proposer_boost_root: Hash256::zero(), + equivocating_indices: BTreeSet::new(), _phantom: PhantomData, } } @@ -217,7 +224,10 @@ where justified_checkpoint: self.justified_checkpoint, justified_balances: self.justified_balances.clone(), best_justified_checkpoint: self.best_justified_checkpoint, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, proposer_boost_root: self.proposer_boost_root, + equivocating_indices: self.equivocating_indices.clone(), } } @@ -234,7 +244,10 @@ where justified_checkpoint: persisted.justified_checkpoint, justified_balances: persisted.justified_balances, best_justified_checkpoint: persisted.best_justified_checkpoint, + unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint, proposer_boost_root: persisted.proposer_boost_root, + equivocating_indices: persisted.equivocating_indices, _phantom: PhantomData, }) } @@ -258,7 +271,7 @@ where fn on_verified_block>( &mut self, - _block: &BeaconBlock, + _block: BeaconBlockRef, block_root: Hash256, state: &BeaconState, ) -> Result<(), Self::Error> { @@ -281,6 +294,14 @@ where &self.finalized_checkpoint } + fn unrealized_justified_checkpoint(&self) -> &Checkpoint { + &self.unrealized_justified_checkpoint + } + + fn unrealized_finalized_checkpoint(&self) -> &Checkpoint { + &self.unrealized_finalized_checkpoint + } + fn proposer_boost_root(&self) -> Hash256 { self.proposer_boost_root } @@ -324,29 +345,51 @@ where self.best_justified_checkpoint = checkpoint } + fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint) { + self.unrealized_justified_checkpoint = checkpoint; + } + + fn set_unrealized_finalized_checkpoint(&mut self, checkpoint: Checkpoint) { + self.unrealized_finalized_checkpoint = checkpoint; + } + fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256) { self.proposer_boost_root = proposer_boost_root; } + + fn equivocating_indices(&self) -> &BTreeSet { + &self.equivocating_indices + } + + fn extend_equivocating_indices(&mut self, indices: impl IntoIterator) { + self.equivocating_indices.extend(indices); + } } /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. #[superstruct( - variants(V1, V7, V8), + variants(V1, V7, V8, V10, V11), variant_attributes(derive(Encode, Decode)), no_enum )] pub struct PersistedForkChoiceStore { #[superstruct(only(V1, V7))] pub balances_cache: BalancesCacheV1, - #[superstruct(only(V8))] + #[superstruct(only(V8, V10, V11))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec, pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V7, V8))] + #[superstruct(only(V10, V11))] + pub unrealized_justified_checkpoint: Checkpoint, + #[superstruct(only(V10, V11))] + pub unrealized_finalized_checkpoint: Checkpoint, + #[superstruct(only(V7, V8, V10, V11))] pub proposer_boost_root: Hash256, + #[superstruct(only(V11))] + pub equivocating_indices: BTreeSet, } -pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV8; +pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV11; diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index d645201a58..6a7de1da9e 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -9,6 +9,7 @@ //! values it stores are very small, so this should not be an issue. use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use fork_choice::ExecutionStatus; use lru::LruCache; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; @@ -135,11 +136,23 @@ impl BeaconProposerCache { pub fn compute_proposer_duties_from_head( current_epoch: Epoch, chain: &BeaconChain, -) -> Result<(Vec, Hash256, Fork), BeaconChainError> { - // Take a copy of the head of the chain. - let head = chain.head()?; - let mut state = head.beacon_state; - let head_state_root = head.beacon_block.state_root(); +) -> Result<(Vec, Hash256, ExecutionStatus, Fork), BeaconChainError> { + // Atomically collect information about the head whilst holding the canonical head `Arc` as + // short as possible. + let (mut state, head_state_root, head_block_root) = { + let head = chain.canonical_head.cached_head(); + // Take a copy of the head state. + let head_state = head.snapshot.beacon_state.clone(); + let head_state_root = head.head_state_root(); + let head_block_root = head.head_block_root(); + (head_state, head_state_root, head_block_root) + }; + + let execution_status = chain + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(BeaconChainError::HeadMissingFromForkChoice(head_block_root))?; // Advance the state into the requested epoch. ensure_state_is_in_epoch(&mut state, head_state_root, current_epoch, &chain.spec)?; @@ -153,7 +166,7 @@ pub fn compute_proposer_duties_from_head( .proposer_shuffling_decision_root(chain.genesis_block_root) .map_err(BeaconChainError::from)?; - Ok((indices, dependent_root, state.fork())) + Ok((indices, dependent_root, execution_status, state.fork())) } /// If required, advance `state` to `target_epoch`. diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 3be198e5e9..73fb47f4fa 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,4 +1,5 @@ use serde_derive::Serialize; +use std::sync::Arc; use types::{ BeaconState, EthSpec, ExecPayload, FullPayload, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, @@ -8,7 +9,7 @@ use types::{ /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug)] pub struct BeaconSnapshot = FullPayload> { - pub beacon_block: SignedBeaconBlock, + pub beacon_block: Arc>, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, } @@ -29,7 +30,7 @@ pub struct PreProcessingSnapshot { impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, beacon_state: BeaconState, ) -> Self { @@ -52,7 +53,7 @@ impl> BeaconSnapshot { /// Update all fields of the checkpoint. pub fn update( &mut self, - beacon_block: SignedBeaconBlock, + beacon_block: Arc>, beacon_block_root: Hash256, beacon_state: BeaconState, ) { diff --git a/beacon_node/beacon_chain/src/block_reward.rs b/beacon_node/beacon_chain/src/block_reward.rs index 9a035f42a7..3bddd2a521 100644 --- a/beacon_node/beacon_chain/src/block_reward.rs +++ b/beacon_node/beacon_chain/src/block_reward.rs @@ -1,8 +1,11 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::lighthouse::{AttestationRewards, BlockReward, BlockRewardMeta}; -use operation_pool::{AttMaxCover, MaxCover, RewardCache}; -use state_processing::per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards; -use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256, RelativeEpoch}; +use operation_pool::{AttMaxCover, MaxCover, RewardCache, SplitAttestation}; +use state_processing::{ + common::get_attesting_indices_from_state, + per_block_processing::altair::sync_committee::compute_sync_aggregate_rewards, +}; +use types::{BeaconBlockRef, BeaconState, EthSpec, ExecPayload, Hash256}; impl BeaconChain { pub fn compute_block_reward>( @@ -10,22 +13,38 @@ impl BeaconChain { block: BeaconBlockRef<'_, T::EthSpec, Payload>, block_root: Hash256, state: &BeaconState, + reward_cache: &mut RewardCache, + include_attestations: bool, ) -> Result { if block.slot() != state.slot() { return Err(BeaconChainError::BlockRewardSlotError); } - let active_indices = state.get_cached_active_validator_indices(RelativeEpoch::Current)?; - let mut reward_cache = RewardCache::default(); reward_cache.update(state)?; - let total_active_balance = state.get_total_balance(active_indices, &self.spec)?; - let mut per_attestation_rewards = block + + let total_active_balance = state.get_total_active_balance()?; + + let split_attestations = block .body() .attestations() .iter() .map(|att| { - AttMaxCover::new(att, state, &reward_cache, total_active_balance, &self.spec) - .ok_or(BeaconChainError::BlockRewardAttestationError) + let attesting_indices = get_attesting_indices_from_state(state, att)?; + Ok(SplitAttestation::new(att.clone(), attesting_indices)) + }) + .collect::, BeaconChainError>>()?; + + let mut per_attestation_rewards = split_attestations + .iter() + .map(|att| { + AttMaxCover::new( + att.as_ref(), + state, + reward_cache, + total_active_balance, + &self.spec, + ) + .ok_or(BeaconChainError::BlockRewardAttestationError) }) .collect::, _>>()?; @@ -36,7 +55,7 @@ impl BeaconChain { let latest_att = &updated[i]; for att in to_update { - att.update_covering_set(latest_att.object(), latest_att.covering_set()); + att.update_covering_set(latest_att.intermediate(), latest_att.covering_set()); } } @@ -62,11 +81,24 @@ impl BeaconChain { .map(|cover| cover.fresh_validators_rewards) .collect(); + // Add the attestation data if desired. + let attestations = if include_attestations { + block + .body() + .attestations() + .iter() + .map(|a| a.data.clone()) + .collect() + } else { + vec![] + }; + let attestation_rewards = AttestationRewards { total: attestation_total, prev_epoch_total, curr_epoch_total, per_attestation_rewards, + attestations, }; // Sync committee rewards. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 48ae91c593..f4eef11441 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -31,10 +31,12 @@ //! |--------------- //! | //! ▼ -//! SignatureVerifiedBlock +//! SignatureVerifiedBlock //! | //! ▼ -//! FullyVerifiedBlock +//! ExecutionPendingBlock +//! | +//! await //! | //! ▼ //! END @@ -42,22 +44,25 @@ //! ``` use crate::beacon_snapshot::PreProcessingSnapshot; use crate::execution_payload::{ - notify_new_payload, validate_execution_payload_for_gossip, validate_merge_block, + is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, + AllowOptimisticImport, PayloadNotifier, }; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ - beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, + beacon_chain::{ + BeaconForkChoice, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; use eth2::types::EventKind; use execution_layer::PayloadStatus; -use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus}; +use fork_choice::PayloadVerificationStatus; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, info, Logger}; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::per_block_processing::is_merge_transition_block; @@ -72,16 +77,16 @@ use std::borrow::Cow; use std::fs; use std::io::Write; use std::sync::Arc; -use store::{Error as DBError, HotColdDB, KeyValueStore, StoreOp}; +use store::{Error as DBError, KeyValueStore, StoreOp}; +use task_executor::JoinHandle; use tree_hash::TreeHash; -use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -const POS_PANDA_BANNER: &str = r#" +pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, ;" ^; ;' ", ;" ^; ;' ", ; s$$$$$$$s ; ; s$$$$$$$s ; @@ -126,7 +131,7 @@ pub enum BlockError { /// /// It's unclear if this block is valid, but it cannot be processed without already knowing /// its parent. - ParentUnknown(Box>), + ParentUnknown(Arc>), /// The block skips too many slots and is a DoS risk. TooManySkippedSlots { parent_slot: Slot, block_slot: Slot }, /// The block slot is greater than the present slot. @@ -329,17 +334,46 @@ pub enum ExecutionPayloadError { terminal_block_hash: ExecutionBlockHash, payload_parent_hash: ExecutionBlockHash, }, - /// The execution node failed to provide a parent block to a known block. This indicates an - /// issue with the execution node. + /// The execution node is syncing but we fail the conditions for optimistic sync /// /// ## Peer scoring /// /// The peer is not necessarily invalid. - PoWParentMissing(ExecutionBlockHash), - /// The execution node is syncing but we fail the conditions for optimistic sync UnverifiedNonOptimisticCandidate, } +impl ExecutionPayloadError { + pub fn penalize_peer(&self) -> bool { + // This match statement should never have a default case so that we are + // always forced to consider here whether or not to penalize a peer when + // we add a new error condition. + match self { + // The peer has nothing to do with this error, do not penalize them. + ExecutionPayloadError::NoExecutionConnection => false, + // The peer has nothing to do with this error, do not penalize them. + ExecutionPayloadError::RequestFailed(_) => false, + // An honest optimistic node may propagate blocks which are rejected by an EE, do not + // penalize them. + ExecutionPayloadError::RejectedByExecutionEngine { .. } => false, + // This is a trivial gossip validation condition, there is no reason for an honest peer + // to propagate a block with an invalid payload time stamp. + ExecutionPayloadError::InvalidPayloadTimestamp { .. } => true, + // An honest optimistic node may propagate blocks with an invalid terminal PoW block, we + // should not penalized them. + ExecutionPayloadError::InvalidTerminalPoWBlock { .. } => false, + // This condition is checked *after* gossip propagation, therefore penalizing gossip + // peers for this block would be unfair. There may be an argument to penalize RPC + // blocks, since even an optimistic node shouldn't verify this block. We will remove the + // penalties for all block imports to keep things simple. + ExecutionPayloadError::InvalidActivationEpoch { .. } => false, + // As per `Self::InvalidActivationEpoch`. + ExecutionPayloadError::InvalidTerminalBlockHash { .. } => false, + // Do not penalize the peer since it's not their fault that *we're* optimistic. + ExecutionPayloadError::UnverifiedNonOptimisticCandidate => false, + } + } +} + impl From for ExecutionPayloadError { fn from(e: execution_layer::Error) -> Self { ExecutionPayloadError::RequestFailed(e) @@ -416,6 +450,12 @@ impl From for BlockError { } } +/// Stores information about verifying a payload against an execution engine. +pub struct PayloadVerificationOutcome { + pub payload_verification_status: PayloadVerificationStatus, + pub is_valid_merge_transition_block: bool, +} + /// Information about invalid blocks which might still be slashable despite being invalid. #[allow(clippy::enum_variant_names)] pub enum BlockSlashInfo { @@ -471,16 +511,16 @@ fn process_block_slash_info( /// Verify all signatures (except deposit signatures) on all blocks in the `chain_segment`. If all /// signatures are valid, the `chain_segment` is mapped to a `Vec` that can -/// later be transformed into a `FullyVerifiedBlock` without re-checking the signatures. If any +/// later be transformed into a `ExecutionPendingBlock` without re-checking the signatures. If any /// signature in the block is invalid, an `Err` is returned (it is not possible to known _which_ /// signature was invalid). /// /// ## Errors /// -/// The given `chain_segment` must span no more than two epochs, otherwise an error will be -/// returned. +/// The given `chain_segment` must contain only blocks from the same epoch, otherwise an error +/// will be returned. pub fn signature_verify_chain_segment( - mut chain_segment: Vec<(Hash256, SignedBeaconBlock)>, + mut chain_segment: Vec<(Hash256, Arc>)>, chain: &BeaconChain, ) -> Result>, BlockError> { if chain_segment.is_empty() { @@ -546,7 +586,7 @@ pub fn signature_verify_chain_segment( #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct GossipVerifiedBlock { - pub block: SignedBeaconBlock, + pub block: Arc>, pub block_root: Hash256, parent: Option>, consensus_context: ConsensusContext, @@ -555,12 +595,16 @@ pub struct GossipVerifiedBlock { /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit /// signatures) have been verified. pub struct SignatureVerifiedBlock { - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, parent: Option>, consensus_context: ConsensusContext, } +/// Used to await the result of executing payload with a remote EE. +type PayloadVerificationHandle = + JoinHandle>>>; + /// A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and /// ready to import into the `BeaconChain`. The validation includes: /// @@ -569,42 +613,42 @@ pub struct SignatureVerifiedBlock { /// - State root check /// - Per block processing /// -/// Note: a `FullyVerifiedBlock` is not _forever_ valid to be imported, it may later become invalid -/// due to finality or some other event. A `FullyVerifiedBlock` should be imported into the +/// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid +/// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the /// `BeaconChain` immediately after it is instantiated. -pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> { - pub block: SignedBeaconBlock, +pub struct ExecutionPendingBlock { + pub block: Arc>, pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, - pub confirmation_db_batch: Vec>, - pub payload_verification_status: PayloadVerificationStatus, + pub confirmed_state_roots: Vec, + pub payload_verification_handle: PayloadVerificationHandle, } -/// Implemented on types that can be converted into a `FullyVerifiedBlock`. +/// Implemented on types that can be converted into a `ExecutionPendingBlock`. /// /// Used to allow functions to accept blocks at various stages of verification. -pub trait IntoFullyVerifiedBlock: Sized { - fn into_fully_verified_block( +pub trait IntoExecutionPendingBlock: Sized { + fn into_execution_pending_block( self, chain: &Arc>, - ) -> Result, BlockError> { - self.into_fully_verified_block_slashable(chain) - .map(|fully_verified| { + ) -> Result, BlockError> { + self.into_execution_pending_block_slashable(chain) + .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { - slasher.accept_block_header(fully_verified.block.signed_block_header()); + slasher.accept_block_header(execution_pending.block.signed_block_header()); } - fully_verified + execution_pending }) .map_err(|slash_info| process_block_slash_info(chain, slash_info)) } /// Convert the block to fully-verified form while producing data to aid checking slashability. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>>; + ) -> Result, BlockSlashInfo>>; fn block(&self) -> &SignedBeaconBlock; } @@ -615,7 +659,7 @@ impl GossipVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result> { // If the block is valid for gossip we don't supply it to the slasher here because @@ -630,7 +674,7 @@ impl GossipVerifiedBlock { /// As for new, but doesn't pass the block to the slasher. fn new_without_slasher_checks( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. @@ -665,7 +709,11 @@ impl GossipVerifiedBlock { // reboot if the `observed_block_producers` cache is empty. In that case, without this // check, we will load the parent and state from disk only to find out later that we // already know this block. - if chain.fork_choice.read().contains_block(&block_root) { + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { return Err(BlockError::BlockIsAlreadyKnown); } @@ -685,10 +733,10 @@ impl GossipVerifiedBlock { // Do not process a block that doesn't descend from the finalized root. // // We check this *before* we load the parent so that we can return a more detailed error. - let block = check_block_is_finalized_descendant::( - block, - &chain.fork_choice.read(), - &chain.store, + check_block_is_finalized_descendant( + chain, + &chain.canonical_head.fork_choice_write_lock(), + &block, )?; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -840,15 +888,15 @@ impl GossipVerifiedBlock { } } -impl IntoFullyVerifiedBlock for GossipVerifiedBlock { +impl IntoExecutionPendingBlock for GossipVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { - let fully_verified = + ) -> Result, BlockSlashInfo>> { + let execution_pending = SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?; - fully_verified.into_fully_verified_block_slashable(chain) + execution_pending.into_execution_pending_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { @@ -862,7 +910,7 @@ impl SignatureVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, chain: &BeaconChain, ) -> Result> { @@ -907,7 +955,7 @@ impl SignatureVerifiedBlock { /// As for `new` above but producing `BlockSlashInfo`. pub fn check_slashable( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, chain: &BeaconChain, ) -> Result>> { @@ -963,12 +1011,12 @@ impl SignatureVerifiedBlock { } } -impl IntoFullyVerifiedBlock for SignatureVerifiedBlock { +impl IntoExecutionPendingBlock for SignatureVerifiedBlock { /// Completes verification of the wrapped `block`. - fn into_fully_verified_block_slashable( + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo>> { let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) @@ -977,7 +1025,7 @@ impl IntoFullyVerifiedBlock for SignatureVerifiedBlock IntoFullyVerifiedBlock for SignatureVerifiedBlock IntoFullyVerifiedBlock for SignedBeaconBlock { +impl IntoExecutionPendingBlock for Arc> { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` - /// and then using that implementation of `IntoFullyVerifiedBlock` to complete verification. - fn into_fully_verified_block_slashable( + /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. + fn into_execution_pending_block_slashable( self, chain: &Arc>, - ) -> Result, BlockSlashInfo>> { + ) -> Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. let block_root = check_block_relevancy(&self, None, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? - .into_fully_verified_block_slashable(chain) + .into_execution_pending_block_slashable(chain) } fn block(&self) -> &SignedBeaconBlock { @@ -1012,7 +1060,7 @@ impl IntoFullyVerifiedBlock for SignedBeaconBlock FullyVerifiedBlock<'a, T> { +impl ExecutionPendingBlock { /// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See /// the struct-level documentation for more information. /// @@ -1021,13 +1069,17 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn from_signature_verified_components( - block: SignedBeaconBlock, + block: Arc>, block_root: Hash256, parent: PreProcessingSnapshot, mut consensus_context: ConsensusContext, chain: &Arc>, ) -> Result> { - if let Some(parent) = chain.fork_choice.read().get_block(&block.parent_root()) { + if let Some(parent) = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block.parent_root()) + { // Reject any block where the parent has an invalid payload. It's impossible for a valid // block to descend from an invalid parent. if parent.execution_status.is_invalid() { @@ -1046,7 +1098,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // because it will revert finalization. Note that the finalized block is stored in fork // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). - return Err(BlockError::ParentUnknown(Box::new(block))); + return Err(BlockError::ParentUnknown(block)); } // Reject any block that exceeds our limit on skipped slots. @@ -1066,7 +1118,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // Stage a batch of operations to be completed atomically if this block is imported // successfully. - let mut confirmation_db_batch = vec![]; + let mut confirmed_state_roots = vec![]; // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { @@ -1119,7 +1171,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { .do_atomically(vec![StoreOp::PutState(state_root, &state)])?; drop(txn_lock); - confirmation_db_batch.push(StoreOp::DeleteStateTemporaryFlag(state_root)); + confirmed_state_roots.push(state_root); state_root }; @@ -1138,59 +1190,82 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { } } - // If this block triggers the merge, check to ensure that it references valid execution - // blocks. - // - // The specification defines this check inside `on_block` in the fork-choice specification, - // however we perform the check here for two reasons: - // - // - There's no point in importing a block that will fail fork choice, so it's best to fail - // early. - // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no - // calls to remote servers. - let valid_merge_transition_block = - if is_merge_transition_block(&state, block.message().body()) { - validate_merge_block(chain, block.message())?; - true - } else { - false + let block_slot = block.slot(); + let state_current_epoch = state.current_epoch(); + + // Define a future that will verify the execution payload with an execution engine (but + // don't execute it yet). + let payload_notifier = PayloadNotifier::new(chain.clone(), block.clone(), &state)?; + let is_valid_merge_transition_block = + is_merge_transition_block(&state, block.message().body()); + let payload_verification_future = async move { + let chain = payload_notifier.chain.clone(); + let block = payload_notifier.block.clone(); + + // If this block triggers the merge, check to ensure that it references valid execution + // blocks. + // + // The specification defines this check inside `on_block` in the fork-choice specification, + // however we perform the check here for two reasons: + // + // - There's no point in importing a block that will fail fork choice, so it's best to fail + // early. + // - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no + // calls to remote servers. + if is_valid_merge_transition_block { + validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?; }; - // The specification declares that this should be run *inside* `per_block_processing`, - // however we run it here to keep `per_block_processing` pure (i.e., no calls to external - // servers). - // - // It is important that this function is called *after* `per_slot_processing`, since the - // `randao` may change. - let payload_verification_status = notify_new_payload(chain, &state, block.message())?; + // The specification declares that this should be run *inside* `per_block_processing`, + // however we run it here to keep `per_block_processing` pure (i.e., no calls to external + // servers). + // + // It is important that this function is called *after* `per_slot_processing`, since the + // `randao` may change. + let payload_verification_status = payload_notifier.notify_new_payload().await?; - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let current_slot = chain - .slot_clock - .now() - .ok_or(BeaconChainError::UnableToReadSlot)?; + // If the payload did not validate or invalidate the block, check to see if this block is + // valid for optimistic import. + if payload_verification_status.is_optimistic() { + let block_hash_opt = block + .message() + .body() + .execution_payload() + .map(|full_payload| full_payload.execution_payload.block_hash); - if !chain - .fork_choice - .read() - .is_optimistic_candidate_block( - current_slot, - block.slot(), - &block.parent_root(), - &chain.spec, - ) - .map_err(BeaconChainError::from)? - { - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + // Ensure the block is a candidate for optimistic import. + if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? + { + warn!( + chain.log, + "Rejecting optimistic block"; + "block_hash" => ?block_hash_opt, + "msg" => "the execution engine is not synced" + ); + return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); + } } - } + + Ok(PayloadVerificationOutcome { + payload_verification_status, + is_valid_merge_transition_block, + }) + }; + // Spawn the payload verification future as a new task, but don't wait for it to complete. + // The `payload_verification_future` will be awaited later to ensure verification completed + // successfully. + let payload_verification_handle = chain + .task_executor + .spawn_handle( + payload_verification_future, + "execution_payload_verification", + ) + .ok_or(BeaconChainError::RuntimeShutdown)?; // If the block is sufficiently recent, notify the validator monitor. if let Some(slot) = chain.slot_clock.now() { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); - if block.slot().epoch(T::EthSpec::slots_per_epoch()) + if block_slot.epoch(T::EthSpec::slots_per_epoch()) + VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 >= epoch { @@ -1199,7 +1274,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { // the `validator_monitor` lock from being bounced or held for a long time whilst // performing `per_slot_processing`. for (i, summary) in summaries.iter().enumerate() { - let epoch = state.current_epoch() - Epoch::from(summaries.len() - i); + let epoch = state_current_epoch - Epoch::from(summaries.len() - i); if let Err(e) = validator_monitor.process_validator_statuses(epoch, summary, &chain.spec) { @@ -1231,8 +1306,14 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { */ if let Some(ref event_handler) = chain.event_handler { if event_handler.has_block_reward_subscribers() { - let block_reward = - chain.compute_block_reward(block.message(), block_root, &state)?; + let mut reward_cache = Default::default(); + let block_reward = chain.compute_block_reward( + block.message(), + block_root, + &state, + &mut reward_cache, + true, + )?; event_handler.register(EventKind::BlockReward(block_reward)); } } @@ -1297,21 +1378,13 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> { }); } - if valid_merge_transition_block { - info!(chain.log, "{}", POS_PANDA_BANNER); - info!(chain.log, "Proof of Stake Activated"; "slot" => block.slot()); - info!(chain.log, ""; "Terminal POW Block Hash" => ?block.message().execution_payload()?.parent_hash().into_root()); - info!(chain.log, ""; "Merge Transition Block Root" => ?block.message().tree_hash_root()); - info!(chain.log, ""; "Merge Transition Execution Hash" => ?block.message().execution_payload()?.block_hash().into_root()); - } - Ok(Self { block, block_root, state, parent_block: parent.beacon_block, - confirmation_db_batch, - payload_verification_status, + confirmed_state_roots, + payload_verification_handle, }) } } @@ -1362,9 +1435,14 @@ fn check_block_against_finalized_slot( block_root: Hash256, chain: &BeaconChain, ) -> Result<(), BlockError> { + // The finalized checkpoint is being read from fork choice, rather than the cached head. + // + // Fork choice has the most up-to-date view of finalization and there's no point importing a + // block which conflicts with the fork-choice view of finalization. let finalized_slot = chain - .head_info()? - .finalized_checkpoint + .canonical_head + .cached_head() + .finalized_checkpoint() .epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -1380,13 +1458,17 @@ fn check_block_against_finalized_slot( } /// Returns `Ok(block)` if the block descends from the finalized root. -pub fn check_block_is_finalized_descendant>( - block: SignedBeaconBlock, - fork_choice: &ForkChoice, - store: &HotColdDB, -) -> Result, BlockError> { +/// +/// ## Warning +/// +/// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. +pub fn check_block_is_finalized_descendant( + chain: &BeaconChain, + fork_choice: &BeaconForkChoice, + block: &Arc>, +) -> Result<(), BlockError> { if fork_choice.is_descendant_of_finalized(block.parent_root()) { - Ok(block) + Ok(()) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, // then there are two more cases: @@ -1396,7 +1478,8 @@ pub fn check_block_is_finalized_descendant( // Check if the block is already known. We know it is post-finalization, so it is // sufficient to check the fork choice. - if chain.fork_choice.read().contains_block(&block_root) { + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { return Err(BlockError::BlockIsAlreadyKnown); } @@ -1474,16 +1561,16 @@ pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { #[allow(clippy::type_complexity)] fn verify_parent_block_is_known( chain: &BeaconChain, - block: SignedBeaconBlock, -) -> Result<(ProtoBlock, SignedBeaconBlock), BlockError> { + block: Arc>, +) -> Result<(ProtoBlock, Arc>), BlockError> { if let Some(proto_block) = chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&block.message().parent_root()) { Ok((proto_block, block)) } else { - Err(BlockError::ParentUnknown(Box::new(block))) + Err(BlockError::ParentUnknown(block)) } } @@ -1493,12 +1580,12 @@ fn verify_parent_block_is_known( /// whilst attempting the operation. #[allow(clippy::type_complexity)] fn load_parent( - block: SignedBeaconBlock, + block: Arc>, chain: &BeaconChain, ) -> Result< ( PreProcessingSnapshot, - SignedBeaconBlock, + Arc>, ), BlockError, > { @@ -1513,11 +1600,11 @@ fn load_parent( // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). if !chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .contains_block(&block.parent_root()) { - return Err(BlockError::ParentUnknown(Box::new(block))); + return Err(BlockError::ParentUnknown(block)); } let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); @@ -1603,6 +1690,9 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( let block_epoch = block_slot.epoch(E::slots_per_epoch()); if state.current_epoch() == block_epoch { + // Build both the current and previous epoch caches, as the previous epoch caches are + // useful for verifying attestations in blocks from the current epoch. + state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; Ok(Cow::Borrowed(state)) @@ -1620,6 +1710,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( partial_state_advance(&mut state, state_root_opt, target_slot, spec) .map_err(|e| BlockError::BeaconChainError(BeaconChainError::from(e)))?; + state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; Ok(Cow::Owned(state)) @@ -1683,19 +1774,12 @@ fn verify_header_signature( .get(header.message.proposer_index as usize) .cloned() .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; - let (fork, genesis_validators_root) = chain - .with_head(|head| { - Ok(( - head.beacon_state.fork(), - head.beacon_state.genesis_validators_root(), - )) - }) - .map_err(|e: BlockError| e)?; + let head_fork = chain.canonical_head.cached_head().head_fork(); if header.verify_signature::( &proposer_pubkey, - &fork, - genesis_validators_root, + &head_fork, + chain.genesis_validators_root, &chain.spec, ) { Ok(()) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 58f1063fdb..646c3840fe 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; @@ -16,7 +16,7 @@ use crate::{ }; use eth1::Config as Eth1Config; use execution_layer::ExecutionLayer; -use fork_choice::ForkChoice; +use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; @@ -76,7 +76,7 @@ pub struct BeaconChainBuilder { >, op_pool: Option>, eth1_chain: Option>, - execution_layer: Option, + execution_layer: Option>, event_handler: Option>, slot_clock: Option, shutdown_sender: Option>, @@ -244,6 +244,12 @@ where let fork_choice = BeaconChain::>::load_fork_choice( store.clone(), + ResetPayloadStatuses::always_reset_conditionally( + self.chain_config.always_reset_payload_statuses, + ), + self.chain_config.count_unrealized_full, + &self.spec, + log, ) .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? .ok_or("Fork choice not found in store")?; @@ -340,7 +346,7 @@ where Ok(( BeaconSnapshot { beacon_block_root, - beacon_block, + beacon_block: Arc::new(beacon_block), beacon_state, }, self, @@ -355,12 +361,16 @@ where self = updated_builder; let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis); + let current_slot = None; let fork_choice = ForkChoice::from_anchor( fc_store, genesis.beacon_block_root, &genesis.beacon_block, &genesis.beacon_state, + current_slot, + self.chain_config.count_unrealized_full, + &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -402,6 +412,12 @@ where )); } + // Prime all caches before storing the state in the database and computing the tree hash + // root. + weak_subj_state + .build_all_caches(&self.spec) + .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; + let computed_state_root = weak_subj_state .update_tree_hash_cache() .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; @@ -478,17 +494,21 @@ where let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, - beacon_block: weak_subj_block, + beacon_block: Arc::new(weak_subj_block), beacon_state: weak_subj_state, }; let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot); + let current_slot = Some(snapshot.beacon_block.slot()); let fork_choice = ForkChoice::from_anchor( fc_store, snapshot.beacon_block_root, &snapshot.beacon_block, &snapshot.beacon_state, + current_slot, + self.chain_config.count_unrealized_full, + &self.spec, ) .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; @@ -504,7 +524,7 @@ where } /// Sets the `BeaconChain` execution layer. - pub fn execution_layer(mut self, execution_layer: Option) -> Self { + pub fn execution_layer(mut self, execution_layer: Option>) -> Self { self.execution_layer = execution_layer; self } @@ -661,17 +681,20 @@ where head_block_root, &head_state, store.clone(), + Some(current_slot), &self.spec, + self.chain_config.count_unrealized.into(), + self.chain_config.count_unrealized_full, )?; } - let mut canonical_head = BeaconSnapshot { + let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, - beacon_block: head_block, + beacon_block: Arc::new(head_block), beacon_state: head_state, }; - canonical_head + head_snapshot .beacon_state .build_all_caches(&self.spec) .map_err(|e| format!("Failed to build state caches: {:?}", e))?; @@ -681,25 +704,17 @@ where // // This is a sanity check to detect database corruption. let fc_finalized = fork_choice.finalized_checkpoint(); - let head_finalized = canonical_head.beacon_state.finalized_checkpoint(); - if fc_finalized != head_finalized { - let is_genesis = head_finalized.root.is_zero() - && head_finalized.epoch == fc_finalized.epoch - && fc_finalized.root == genesis_block_root; - let is_wss = store.get_anchor_slot().map_or(false, |anchor_slot| { - fc_finalized.epoch == anchor_slot.epoch(TEthSpec::slots_per_epoch()) - }); - if !is_genesis && !is_wss { - return Err(format!( - "Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \ + let head_finalized = head_snapshot.beacon_state.finalized_checkpoint(); + if fc_finalized.epoch < head_finalized.epoch { + return Err(format!( + "Database corrupt: fork choice is finalized at {:?} whilst head is finalized at \ {:?}", - fc_finalized, head_finalized - )); - } + fc_finalized, head_finalized + )); } let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| { - ValidatorPubkeyCache::new(&canonical_head.beacon_state, store.clone()) + ValidatorPubkeyCache::new(&head_snapshot.beacon_state, store.clone()) .map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e)) })?; @@ -714,7 +729,7 @@ where if let Some(slot) = slot_clock.now() { validator_monitor.process_valid_state( slot.epoch(TEthSpec::slots_per_epoch()), - &canonical_head.beacon_state, + &head_snapshot.beacon_state, ); } @@ -748,10 +763,17 @@ where .do_atomically(self.pending_io_batch) .map_err(|e| format!("Error writing chain & metadata to disk: {:?}", e))?; + let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); + let genesis_time = head_snapshot.beacon_state.genesis_time(); + let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); + let beacon_chain = BeaconChain { spec: self.spec, config: self.chain_config, store, + task_executor: self + .task_executor + .ok_or("Cannot build without task executor")?, store_migrator, slot_clock, op_pool: self.op_pool.ok_or("Cannot build without op pool")?, @@ -781,11 +803,11 @@ where observed_attester_slashings: <_>::default(), eth1_chain: self.eth1_chain, execution_layer: self.execution_layer, - genesis_validators_root: canonical_head.beacon_state.genesis_validators_root(), - canonical_head: TimeoutRwLock::new(canonical_head.clone()), + genesis_validators_root, + genesis_time, + canonical_head, genesis_block_root, genesis_state_root, - fork_choice: RwLock::new(fork_choice), fork_choice_signal_tx, fork_choice_signal_rx, event_handler: self.event_handler, @@ -806,9 +828,7 @@ where validator_monitor: RwLock::new(validator_monitor), }; - let head = beacon_chain - .head() - .map_err(|e| format!("Failed to get head: {:?}", e))?; + let head = beacon_chain.head_snapshot(); // Prime the attester cache with the head state. beacon_chain @@ -1011,10 +1031,10 @@ mod test { .build() .expect("should build"); - let head = chain.head().expect("should get head"); + let head = chain.head_snapshot(); - let state = head.beacon_state; - let block = head.beacon_block; + let state = &head.beacon_state; + let block = &head.beacon_block; assert_eq!(state.slot(), Slot::new(0), "should start from genesis"); assert_eq!( @@ -1033,7 +1053,7 @@ mod test { .get_blinded_block(&Hash256::zero()) .expect("should read db") .expect("should find genesis block"), - block.clone().into(), + block.clone_as_blinded(), "should store genesis block under zero hash alias" ); assert_eq!( diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs new file mode 100644 index 0000000000..e6e99db03c --- /dev/null +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -0,0 +1,1334 @@ +//! This module provides all functionality for finding the canonical head, updating all necessary +//! components (e.g. caches) and maintaining a cached head block and state. +//! +//! For practically all applications, the "canonical head" can be read using +//! `beacon_chain.canonical_head.cached_head()`. +//! +//! The canonical head can be updated using `beacon_chain.recompute_head()`. +//! +//! ## Deadlock safety +//! +//! This module contains three locks: +//! +//! 1. `RwLock`: Contains `proto_array` fork choice. +//! 2. `RwLock`: Contains a cached block/state from the last run of `proto_array`. +//! 3. `Mutex<()>`: Is used to prevent concurrent execution of `BeaconChain::recompute_head`. +//! +//! This module has to take great efforts to avoid causing a deadlock with these three methods. Any +//! developers working in this module should tread carefully and seek a detailed review. +//! +//! To encourage safe use of this module, it should **only ever return a read or write lock for the +//! fork choice lock (lock 1)**. Whilst public functions might indirectly utilise locks (2) and (3), +//! the fundamental `RwLockWriteGuard` or `RwLockReadGuard` should never be exposed. This prevents +//! external functions from acquiring these locks in conflicting orders and causing a deadlock. +//! +//! ## Design Considerations +//! +//! We separate the `BeaconForkChoice` and `CachedHead` into two `RwLocks` because we want to ensure +//! fast access to the `CachedHead`. If we were to put them both under the same lock, we would need +//! to take an exclusive write-lock on it in order to run `ForkChoice::get_head`. This can take tens +//! of milliseconds and would block all downstream functions that want to know simple things like +//! the head block root. This is unacceptable for fast-responding functions like the networking +//! stack. + +use crate::persisted_fork_choice::PersistedForkChoice; +use crate::{ + beacon_chain::{BeaconForkChoice, BeaconStore, FORK_CHOICE_DB_KEY}, + block_times_cache::BlockTimesCache, + events::ServerSentEventHandler, + metrics, + validator_monitor::{get_slot_delay_ms, timestamp_now}, + BeaconChain, BeaconChainError as Error, BeaconChainTypes, BeaconSnapshot, +}; +use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead}; +use fork_choice::{ + CountUnrealizedFull, ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, + ResetPayloadStatuses, +}; +use itertools::process_results; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use slog::{crit, debug, error, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::Duration; +use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem}; +use task_executor::{JoinHandle, ShutdownReason}; +use types::*; + +/// Simple wrapper around `RwLock` that uses private visibility to prevent any other modules from +/// accessing the contained lock without it being explicitly noted in this module. +pub struct CanonicalHeadRwLock(RwLock); + +impl From> for CanonicalHeadRwLock { + fn from(rw_lock: RwLock) -> Self { + Self(rw_lock) + } +} + +impl CanonicalHeadRwLock { + fn new(item: T) -> Self { + Self::from(RwLock::new(item)) + } + + fn read(&self) -> RwLockReadGuard { + self.0.read() + } + + fn write(&self) -> RwLockWriteGuard { + self.0.write() + } +} + +/// Provides a series of cached values from the last time `BeaconChain::recompute_head` was run. +/// +/// This struct is designed to be cheap-to-clone, any large fields should be wrapped in an `Arc` (or +/// similar). +#[derive(Clone)] +pub struct CachedHead { + /// Provides the head block and state from the last time the head was updated. + pub snapshot: Arc>, + /// The justified checkpoint as per `self.fork_choice`. + /// + /// This value may be distinct to the `self.snapshot.beacon_state.justified_checkpoint`. + /// This value should be used over the beacon state value in practically all circumstances. + justified_checkpoint: Checkpoint, + /// The finalized checkpoint as per `self.fork_choice`. + /// + /// This value may be distinct to the `self.snapshot.beacon_state.finalized_checkpoint`. + /// This value should be used over the beacon state value in practically all circumstances. + finalized_checkpoint: Checkpoint, + /// The `execution_payload.block_hash` of the block at the head of the chain. Set to `None` + /// before Bellatrix. + head_hash: Option, + /// The `execution_payload.block_hash` of the justified block. Set to `None` before Bellatrix. + justified_hash: Option, + /// The `execution_payload.block_hash` of the finalized block. Set to `None` before Bellatrix. + finalized_hash: Option, +} + +impl CachedHead { + /// Returns root of the block at the head of the beacon chain. + pub fn head_block_root(&self) -> Hash256 { + self.snapshot.beacon_block_root + } + + /// Returns root of the `BeaconState` at the head of the beacon chain. + /// + /// ## Note + /// + /// This `BeaconState` has *not* been advanced to the current slot, it has the same slot as the + /// head block. + pub fn head_state_root(&self) -> Hash256 { + self.snapshot.beacon_state_root() + } + + /// Returns slot of the block at the head of the beacon chain. + /// + /// ## Notes + /// + /// This is *not* the current slot as per the system clock. Use `BeaconChain::slot` for the + /// system clock (aka "wall clock") slot. + pub fn head_slot(&self) -> Slot { + self.snapshot.beacon_block.slot() + } + + /// Returns the `Fork` from the `BeaconState` at the head of the chain. + pub fn head_fork(&self) -> Fork { + self.snapshot.beacon_state.fork() + } + + /// Returns the randao mix for the block at the head of the chain. + pub fn head_random(&self) -> Result { + let state = &self.snapshot.beacon_state; + let root = *state.get_randao_mix(state.current_epoch())?; + Ok(root) + } + + /// Returns the active validator count for the current epoch of the head state. + /// + /// Should only return `None` if the caches have not been built on the head state (this should + /// never happen). + pub fn active_validator_count(&self) -> Option { + self.snapshot + .beacon_state + .get_cached_active_validator_indices(RelativeEpoch::Current) + .map(|indices| indices.len()) + .ok() + } + + /// Returns the finalized checkpoint, as determined by fork choice. + /// + /// ## Note + /// + /// This is *not* the finalized checkpoint of the `head_snapshot.beacon_state`, rather it is the + /// best finalized checkpoint that has been observed by `self.fork_choice`. It is possible that + /// the `head_snapshot.beacon_state` finalized value is earlier than the one returned here. + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.finalized_checkpoint + } + + /// Returns the justified checkpoint, as determined by fork choice. + /// + /// ## Note + /// + /// This is *not* the "current justified checkpoint" of the `head_snapshot.beacon_state`, rather + /// it is the justified checkpoint in the view of `self.fork_choice`. It is possible that the + /// `head_snapshot.beacon_state` justified value is different to, but not conflicting with, the + /// one returned here. + pub fn justified_checkpoint(&self) -> Checkpoint { + self.justified_checkpoint + } + + /// Returns the cached values of `ForkChoice::forkchoice_update_parameters`. + /// + /// Useful for supplying to the execution layer. + pub fn forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { + ForkchoiceUpdateParameters { + head_root: self.snapshot.beacon_block_root, + head_hash: self.head_hash, + justified_hash: self.justified_hash, + finalized_hash: self.finalized_hash, + } + } +} + +/// Represents the "canonical head" of the beacon chain. +/// +/// The `cached_head` is elected by the `fork_choice` algorithm contained in this struct. +/// +/// There is no guarantee that the state of the `fork_choice` struct will always represent the +/// `cached_head` (i.e. we may call `fork_choice` *without* updating the cached values), however +/// there is a guarantee that the `cached_head` represents some past state of `fork_choice` (i.e. +/// `fork_choice` never lags *behind* the `cached_head`). +pub struct CanonicalHead { + /// Provides an in-memory representation of the non-finalized block tree and is used to run the + /// fork choice algorithm and determine the canonical head. + pub fork_choice: CanonicalHeadRwLock>, + /// Provides values cached from a previous execution of `self.fork_choice.get_head`. + /// + /// Although `self.fork_choice` might be slightly more advanced that this value, it is safe to + /// consider that these values represent the "canonical head" of the beacon chain. + pub cached_head: CanonicalHeadRwLock>, + /// A lock used to prevent concurrent runs of `BeaconChain::recompute_head`. + /// + /// This lock **should not be made public**, it should only be used inside this module. + recompute_head_lock: Mutex<()>, +} + +impl CanonicalHead { + /// Instantiate `Self`. + pub fn new( + fork_choice: BeaconForkChoice, + snapshot: Arc>, + ) -> Self { + let fork_choice_view = fork_choice.cached_fork_choice_view(); + let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); + let cached_head = CachedHead { + snapshot, + justified_checkpoint: fork_choice_view.justified_checkpoint, + finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_hash: forkchoice_update_params.head_hash, + justified_hash: forkchoice_update_params.justified_hash, + finalized_hash: forkchoice_update_params.finalized_hash, + }; + + Self { + fork_choice: CanonicalHeadRwLock::new(fork_choice), + cached_head: CanonicalHeadRwLock::new(cached_head), + recompute_head_lock: Mutex::new(()), + } + } + + /// Load a persisted version of `BeaconForkChoice` from the `store` and restore `self` to that + /// state. + /// + /// This is useful if some database corruption is expected and we wish to go back to our last + /// save-point. + pub(crate) fn restore_from_store( + &self, + // We don't actually need this value, however it's always present when we call this function + // and it needs to be dropped to prevent a dead-lock. Requiring it to be passed here is + // defensive programming. + mut fork_choice_write_lock: RwLockWriteGuard>, + reset_payload_statuses: ResetPayloadStatuses, + count_unrealized_full: CountUnrealizedFull, + store: &BeaconStore, + spec: &ChainSpec, + log: &Logger, + ) -> Result<(), Error> { + let fork_choice = >::load_fork_choice( + store.clone(), + reset_payload_statuses, + count_unrealized_full, + spec, + log, + )? + .ok_or(Error::MissingPersistedForkChoice)?; + let fork_choice_view = fork_choice.cached_fork_choice_view(); + let beacon_block_root = fork_choice_view.head_block_root; + let beacon_block = store + .get_full_block(&beacon_block_root)? + .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; + let beacon_state_root = beacon_block.state_root(); + let beacon_state = store + .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .ok_or(Error::MissingBeaconState(beacon_state_root))?; + + let snapshot = BeaconSnapshot { + beacon_block_root, + beacon_block: Arc::new(beacon_block), + beacon_state, + }; + + let forkchoice_update_params = fork_choice.get_forkchoice_update_parameters(); + let cached_head = CachedHead { + snapshot: Arc::new(snapshot), + justified_checkpoint: fork_choice_view.justified_checkpoint, + finalized_checkpoint: fork_choice_view.finalized_checkpoint, + head_hash: forkchoice_update_params.head_hash, + justified_hash: forkchoice_update_params.justified_hash, + finalized_hash: forkchoice_update_params.finalized_hash, + }; + + *fork_choice_write_lock = fork_choice; + // Avoid interleaving the fork choice and cached head locks. + drop(fork_choice_write_lock); + *self.cached_head.write() = cached_head; + + Ok(()) + } + + /// Returns the execution status of the block at the head of the beacon chain. + /// + /// This will only return `Err` in the scenario where `self.fork_choice` has advanced + /// significantly past the cached `head_snapshot`. In such a scenario it is likely prudent to + /// run `BeaconChain::recompute_head` to update the cached values. + pub fn head_execution_status(&self) -> Result { + let head_block_root = self.cached_head().head_block_root(); + self.fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::HeadMissingFromForkChoice(head_block_root)) + } + + /// Returns a clone of the `CachedHead` and the execution status of the contained head block. + /// + /// This will only return `Err` in the scenario where `self.fork_choice` has advanced + /// significantly past the cached `head_snapshot`. In such a scenario it is likely prudent to + /// run `BeaconChain::recompute_head` to update the cached values. + pub fn head_and_execution_status( + &self, + ) -> Result<(CachedHead, ExecutionStatus), Error> { + let head = self.cached_head(); + let head_block_root = head.head_block_root(); + let execution_status = self + .fork_choice_read_lock() + .get_block_execution_status(&head_block_root) + .ok_or(Error::HeadMissingFromForkChoice(head_block_root))?; + Ok((head, execution_status)) + } + + /// Returns a clone of `self.cached_head`. + /// + /// Takes a read-lock on `self.cached_head` for a short time (just long enough to clone it). + /// The `CachedHead` is designed to be fast-to-clone so this is preferred to passing back a + /// `RwLockReadGuard`, which may cause deadlock issues (see module-level documentation). + /// + /// This function is safe to be public since it does not expose any locks. + pub fn cached_head(&self) -> CachedHead { + self.cached_head_read_lock().clone() + } + + /// Access a read-lock for the cached head. + /// + /// This function is **not safe** to be public. See the module-level documentation for more + /// information about protecting from deadlocks. + fn cached_head_read_lock(&self) -> RwLockReadGuard> { + self.cached_head.read() + } + + /// Access a write-lock for the cached head. + /// + /// This function is **not safe** to be public. See the module-level documentation for more + /// information about protecting from deadlocks. + fn cached_head_write_lock(&self) -> RwLockWriteGuard> { + self.cached_head.write() + } + + /// Access a read-lock for fork choice. + pub fn fork_choice_read_lock(&self) -> RwLockReadGuard> { + self.fork_choice.read() + } + + /// Access a write-lock for fork choice. + pub fn fork_choice_write_lock(&self) -> RwLockWriteGuard> { + self.fork_choice.write() + } +} + +impl BeaconChain { + /// Contains the "best block"; the head of the canonical `BeaconChain`. + /// + /// It is important to note that the `snapshot.beacon_state` returned may not match the present slot. It + /// is the state as it was when the head block was received, which could be some slots prior to + /// now. + pub fn head(&self) -> CachedHead { + self.canonical_head.cached_head() + } + + /// Apply a function to an `Arc`-clone of the canonical head snapshot. + /// + /// This method is a relic from an old implementation where the canonical head was not behind + /// an `Arc` and the canonical head lock had to be held whenever it was read. This method is + /// fine to be left here, it just seems a bit weird. + pub fn with_head( + &self, + f: impl FnOnce(&BeaconSnapshot) -> Result, + ) -> Result + where + E: From, + { + let head_snapshot = self.head_snapshot(); + f(&head_snapshot) + } + + /// Returns the beacon block root at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block_root(&self) -> Hash256 { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block_root + } + + /// Returns the slot of the highest block in the canonical chain. + pub fn best_slot(&self) -> Slot { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block + .slot() + } + + /// Returns a `Arc` of the `BeaconSnapshot` at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_snapshot(&self) -> Arc> { + self.canonical_head.cached_head_read_lock().snapshot.clone() + } + + /// Returns the beacon block at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block(&self) -> Arc> { + self.canonical_head + .cached_head_read_lock() + .snapshot + .beacon_block + .clone() + } + + /// Returns a clone of the beacon state at the head of the canonical chain. + /// + /// Cloning the head state is expensive and should generally be avoided outside of tests. + /// + /// See `Self::head` for more information. + pub fn head_beacon_state_cloned(&self) -> BeaconState { + // Don't clone whilst holding the read-lock, take an Arc-clone to reduce lock contention. + let snapshot: Arc<_> = self.head_snapshot(); + snapshot.beacon_state.clone() + } + + /// Execute the fork choice algorithm and enthrone the result as the canonical head. + /// + /// This method replaces the old `BeaconChain::fork_choice` method. + pub async fn recompute_head_at_current_slot(self: &Arc) { + match self.slot() { + Ok(current_slot) => self.recompute_head_at_slot(current_slot).await, + Err(e) => error!( + self.log, + "No slot when recomputing head"; + "error" => ?e + ), + } + } + + /// Execute the fork choice algorithm and enthrone the result as the canonical head. + /// + /// The `current_slot` is specified rather than relying on the wall-clock slot. Using a + /// different slot to the wall-clock can be useful for pushing fork choice into the next slot + /// *just* before the start of the slot. This ensures that block production can use the correct + /// head value without being delayed. + /// + /// This function purposefully does *not* return a `Result`. It's possible for fork choice to + /// fail to update if there is only one viable head and it has an invalid execution payload. In + /// such a case it's critical that the `BeaconChain` keeps importing blocks so that the + /// situation can be rectified. We avoid returning an error here so that calling functions + /// can't abort block import because an error is returned here. + pub async fn recompute_head_at_slot(self: &Arc, current_slot: Slot) { + metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); + let _timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); + + let chain = self.clone(); + match self + .spawn_blocking_handle( + move || chain.recompute_head_at_slot_internal(current_slot), + "recompute_head_internal", + ) + .await + { + // Fork choice returned successfully and did not need to update the EL. + Ok(Ok(None)) => (), + // Fork choice returned successfully and needed to update the EL. It has returned a + // join-handle from when it spawned some async tasks. We should await those tasks. + Ok(Ok(Some(join_handle))) => match join_handle.await { + // The async task completed successfully. + Ok(Some(())) => (), + // The async task did not complete successfully since the runtime is shutting down. + Ok(None) => { + debug!( + self.log, + "Did not update EL fork choice"; + "info" => "shutting down" + ); + } + // The async task did not complete successfully, tokio returned an error. + Err(e) => { + error!( + self.log, + "Did not update EL fork choice"; + "error" => ?e + ); + } + }, + // There was an error recomputing the head. + Ok(Err(e)) => { + metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); + error!( + self.log, + "Error whist recomputing head"; + "error" => ?e + ); + } + // There was an error spawning the task. + Err(e) => { + error!( + self.log, + "Failed to spawn recompute head task"; + "error" => ?e + ); + } + } + } + + /// A non-async (blocking) function which recomputes the canonical head and spawns async tasks. + /// + /// This function performs long-running, heavy-lifting tasks which should not be performed on + /// the core `tokio` executor. + fn recompute_head_at_slot_internal( + self: &Arc, + current_slot: Slot, + ) -> Result>>, Error> { + let recompute_head_lock = self.canonical_head.recompute_head_lock.lock(); + + // Take a clone of the current ("old") head. + let old_cached_head = self.canonical_head.cached_head(); + + // Determine the current ("old") fork choice parameters. + // + // It is important to read the `fork_choice_view` from the cached head rather than from fork + // choice, since the fork choice value might have changed between calls to this function. We + // are interested in the changes since we last cached the head values, not since fork choice + // was last run. + let old_view = ForkChoiceView { + head_block_root: old_cached_head.head_block_root(), + justified_checkpoint: old_cached_head.justified_checkpoint(), + finalized_checkpoint: old_cached_head.finalized_checkpoint(), + }; + + let mut fork_choice_write_lock = self.canonical_head.fork_choice_write_lock(); + + // Recompute the current head via the fork choice algorithm. + fork_choice_write_lock.get_head(current_slot, &self.spec)?; + + // Downgrade the fork choice write-lock to a read lock, without allowing access to any + // other writers. + let fork_choice_read_lock = RwLockWriteGuard::downgrade(fork_choice_write_lock); + + // Read the current head value from the fork choice algorithm. + let new_view = fork_choice_read_lock.cached_fork_choice_view(); + + // Check to ensure that the finalized block hasn't been marked as invalid. If it has, + // shut down Lighthouse. + let finalized_proto_block = fork_choice_read_lock.get_finalized_block()?; + check_finalized_payload_validity(self, &finalized_proto_block)?; + + // Sanity check the finalized checkpoint. + // + // The new finalized checkpoint must be either equal to or better than the previous + // finalized checkpoint. + check_against_finality_reversion(&old_view, &new_view)?; + + let new_head_proto_block = fork_choice_read_lock + .get_block(&new_view.head_block_root) + .ok_or(Error::HeadBlockMissingFromForkChoice( + new_view.head_block_root, + ))?; + + // Do not allow an invalid block to become the head. + // + // This check avoids the following infinite loop: + // + // 1. A new block is set as the head. + // 2. The EL is updated with the new head, and returns INVALID. + // 3. We call `process_invalid_execution_payload` and it calls this function. + // 4. This function elects an invalid block as the head. + // 5. GOTO 2 + // + // In theory, fork choice should never select an invalid head (i.e., step #3 is impossible). + // However, this check is cheap. + if new_head_proto_block.execution_status.is_invalid() { + return Err(Error::HeadHasInvalidPayload { + block_root: new_head_proto_block.root, + execution_status: new_head_proto_block.execution_status, + }); + } + + // Exit early if the head or justified/finalized checkpoints have not changed, there's + // nothing to do. + if new_view == old_view { + debug!( + self.log, + "No change in canonical head"; + "head" => ?new_view.head_block_root + ); + return Ok(None); + } + + // Get the parameters to update the execution layer since either the head or some finality + // parameters have changed. + let new_forkchoice_update_parameters = + fork_choice_read_lock.get_forkchoice_update_parameters(); + + perform_debug_logging::(&old_view, &new_view, &fork_choice_read_lock, &self.log); + + // Drop the read lock, it's no longer required and holding it any longer than necessary + // will just cause lock contention. + drop(fork_choice_read_lock); + + // If the head has changed, update `self.canonical_head`. + let new_cached_head = if new_view.head_block_root != old_view.head_block_root { + metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); + + let mut new_snapshot = { + let beacon_block = self + .store + .get_full_block(&new_view.head_block_root)? + .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + + // FIXME(sproul): use advanced state? + let beacon_state_root = beacon_block.state_root(); + let beacon_state: BeaconState = self + .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .ok_or(Error::MissingBeaconState(beacon_state_root))?; + + BeaconSnapshot { + beacon_block: Arc::new(beacon_block), + beacon_block_root: new_view.head_block_root, + beacon_state, + } + }; + + // Regardless of where we got the state from, attempt to build the committee + // caches. + new_snapshot + .beacon_state + .build_all_committee_caches(&self.spec)?; + + let new_cached_head = CachedHead { + snapshot: Arc::new(new_snapshot), + justified_checkpoint: new_view.justified_checkpoint, + finalized_checkpoint: new_view.finalized_checkpoint, + head_hash: new_forkchoice_update_parameters.head_hash, + justified_hash: new_forkchoice_update_parameters.justified_hash, + finalized_hash: new_forkchoice_update_parameters.finalized_hash, + }; + + let new_head = { + // Now the new snapshot has been obtained, take a write-lock on the cached head so + // we can update it quickly. + let mut cached_head_write_lock = self.canonical_head.cached_head_write_lock(); + // Enshrine the new head as the canonical cached head. + *cached_head_write_lock = new_cached_head; + // Take a clone of the cached head for later use. It is cloned whilst + // holding the write-lock to ensure we get exactly the head we just enshrined. + cached_head_write_lock.clone() + }; + + // Clear the early attester cache in case it conflicts with `self.canonical_head`. + self.early_attester_cache.clear(); + + new_head + } else { + let new_cached_head = CachedHead { + // The head hasn't changed, take a relatively cheap `Arc`-clone of the existing + // head. + snapshot: old_cached_head.snapshot.clone(), + justified_checkpoint: new_view.justified_checkpoint, + finalized_checkpoint: new_view.finalized_checkpoint, + head_hash: new_forkchoice_update_parameters.head_hash, + justified_hash: new_forkchoice_update_parameters.justified_hash, + finalized_hash: new_forkchoice_update_parameters.finalized_hash, + }; + + let mut cached_head_write_lock = self.canonical_head.cached_head_write_lock(); + + // Enshrine the new head as the canonical cached head. Whilst the head block hasn't + // changed, the FFG checkpoints must have changed. + *cached_head_write_lock = new_cached_head; + + // Take a clone of the cached head for later use. It is cloned whilst + // holding the write-lock to ensure we get exactly the head we just enshrined. + cached_head_write_lock.clone() + }; + + // Alias for readability. + let new_snapshot = &new_cached_head.snapshot; + let old_snapshot = &old_cached_head.snapshot; + + // If the head changed, perform some updates. + if new_snapshot.beacon_block_root != old_snapshot.beacon_block_root { + if let Err(e) = + self.after_new_head(&old_cached_head, &new_cached_head, new_head_proto_block) + { + crit!( + self.log, + "Error updating canonical head"; + "error" => ?e + ); + } + } + + // Drop the old cache head nice and early to try and free the memory as soon as possible. + drop(old_cached_head); + + // If the finalized checkpoint changed, perform some updates. + // + // The `after_finalization` function will take a write-lock on `fork_choice`, therefore it + // is a dead-lock risk to hold any other lock on fork choice at this point. + if new_view.finalized_checkpoint != old_view.finalized_checkpoint { + if let Err(e) = + self.after_finalization(&new_cached_head, new_view, finalized_proto_block) + { + crit!( + self.log, + "Error updating finalization"; + "error" => ?e + ); + } + } + + // The execution layer updates might attempt to take a write-lock on fork choice, so it's + // important to ensure the fork-choice lock isn't being held. + let el_update_handle = + spawn_execution_layer_updates(self.clone(), new_forkchoice_update_parameters)?; + + // We have completed recomputing the head and it's now valid for another process to do the + // same. + drop(recompute_head_lock); + + Ok(Some(el_update_handle)) + } + + /// Perform updates to caches and other components after the canonical head has been changed. + fn after_new_head( + self: &Arc, + old_cached_head: &CachedHead, + new_cached_head: &CachedHead, + new_head_proto_block: ProtoBlock, + ) -> Result<(), Error> { + let old_snapshot = &old_cached_head.snapshot; + let new_snapshot = &new_cached_head.snapshot; + let new_head_is_optimistic = new_head_proto_block + .execution_status + .is_optimistic_or_invalid(); + + // Detect and potentially report any re-orgs. + let reorg_distance = detect_reorg( + &old_snapshot.beacon_state, + old_snapshot.beacon_block_root, + &new_snapshot.beacon_state, + new_snapshot.beacon_block_root, + &self.spec, + &self.log, + ); + + // Determine if the new head is in a later epoch to the previous head. + let is_epoch_transition = old_snapshot + .beacon_block + .slot() + .epoch(T::EthSpec::slots_per_epoch()) + < new_snapshot + .beacon_state + .slot() + .epoch(T::EthSpec::slots_per_epoch()); + + // These fields are used for server-sent events. + let state_root = new_snapshot.beacon_state_root(); + let head_slot = new_snapshot.beacon_state.slot(); + let dependent_root = new_snapshot + .beacon_state + .proposer_shuffling_decision_root(self.genesis_block_root); + let prev_dependent_root = new_snapshot + .beacon_state + .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); + + observe_head_block_delays( + &mut self.block_times_cache.write(), + &new_head_proto_block, + new_snapshot.beacon_block.message().proposer_index(), + new_snapshot + .beacon_block + .message() + .body() + .graffiti() + .as_utf8_lossy(), + &self.slot_clock, + self.event_handler.as_ref(), + &self.log, + ); + + if is_epoch_transition || reorg_distance.is_some() { + self.persist_head_and_fork_choice()?; + self.op_pool.prune_attestations(self.epoch()?); + } + + // Register server-sent-events for a new head. + if let Some(event_handler) = self + .event_handler + .as_ref() + .filter(|handler| handler.has_head_subscribers()) + { + match (dependent_root, prev_dependent_root) { + (Ok(current_duty_dependent_root), Ok(previous_duty_dependent_root)) => { + event_handler.register(EventKind::Head(SseHead { + slot: head_slot, + block: new_snapshot.beacon_block_root, + state: state_root, + current_duty_dependent_root, + previous_duty_dependent_root, + epoch_transition: is_epoch_transition, + execution_optimistic: new_head_is_optimistic, + })); + } + (Err(e), _) | (_, Err(e)) => { + warn!( + self.log, + "Unable to find dependent roots, cannot register head event"; + "error" => ?e + ); + } + } + } + + // Register a server-sent-event for a reorg (if necessary). + if let Some(depth) = reorg_distance { + if let Some(event_handler) = self + .event_handler + .as_ref() + .filter(|handler| handler.has_reorg_subscribers()) + { + event_handler.register(EventKind::ChainReorg(SseChainReorg { + slot: head_slot, + depth: depth.as_u64(), + old_head_block: old_snapshot.beacon_block_root, + old_head_state: old_snapshot.beacon_state_root(), + new_head_block: new_snapshot.beacon_block_root, + new_head_state: new_snapshot.beacon_state_root(), + epoch: head_slot.epoch(T::EthSpec::slots_per_epoch()), + execution_optimistic: new_head_is_optimistic, + })); + } + } + + Ok(()) + } + + /// Perform updates to caches and other components after the finalized checkpoint has been + /// changed. + /// + /// This function will take a write-lock on `canonical_head.fork_choice`, therefore it would be + /// unwise to hold any lock on fork choice while calling this function. + fn after_finalization( + self: &Arc, + new_cached_head: &CachedHead, + new_view: ForkChoiceView, + finalized_proto_block: ProtoBlock, + ) -> Result<(), Error> { + let new_snapshot = &new_cached_head.snapshot; + let finalized_block_is_optimistic = finalized_proto_block + .execution_status + .is_optimistic_or_invalid(); + + self.op_pool + .prune_all(&new_snapshot.beacon_state, self.epoch()?); + + self.observed_block_producers.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + + self.attester_cache + .prune_below(new_view.finalized_checkpoint.epoch); + + if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_finalized_subscribers() { + event_handler.register(EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { + epoch: new_view.finalized_checkpoint.epoch, + block: new_view.finalized_checkpoint.root, + // Provide the state root of the latest finalized block, rather than the + // specific state root at the first slot of the finalized epoch (which + // might be a skip slot). + state: finalized_proto_block.state_root, + execution_optimistic: finalized_block_is_optimistic, + })); + } + } + + // The store migration task requires the *state at the slot of the finalized epoch*, + // rather than the state of the latest finalized block. These two values will only + // differ when the first slot of the finalized epoch is a skip slot. + // + // Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot` + // to ensure we use the same state that we just set as the head. + let new_finalized_slot = new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let new_finalized_state_root = process_results( + StateRootsIterator::new(&self.store, &new_snapshot.beacon_state), + |mut iter| { + iter.find_map(|(state_root, slot)| { + if slot == new_finalized_slot { + Some(state_root) + } else { + None + } + }) + }, + )? + .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; + + self.store_migrator.process_finalization( + new_finalized_state_root.into(), + new_view.finalized_checkpoint, + self.head_tracker.clone(), + )?; + + // Take a write-lock on the canonical head and signal for it to prune. + self.canonical_head.fork_choice_write_lock().prune()?; + + Ok(()) + } + + /// Return a database operation for writing fork choice to disk. + pub fn persist_fork_choice_in_batch(&self) -> Result { + Self::persist_fork_choice_in_batch_standalone(&self.canonical_head.fork_choice_read_lock()) + } + + /// Return a database operation for writing fork choice to disk. + pub fn persist_fork_choice_in_batch_standalone( + fork_choice: &BeaconForkChoice, + ) -> Result { + let persisted_fork_choice = PersistedForkChoice { + fork_choice: fork_choice.to_persisted(), + fork_choice_store: fork_choice.fc_store().to_persisted(), + }; + persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY) + } +} + +/// Check to see if the `finalized_proto_block` has an invalid execution payload. If so, shut down +/// Lighthouse. +/// +/// ## Notes +/// +/// This function is called whilst holding a write-lock on the `canonical_head`. To ensure dead-lock +/// safety, **do not take any other locks inside this function**. +fn check_finalized_payload_validity( + chain: &BeaconChain, + finalized_proto_block: &ProtoBlock, +) -> Result<(), Error> { + if let ExecutionStatus::Invalid(block_hash) = finalized_proto_block.execution_status { + crit!( + chain.log, + "Finalized block has an invalid payload"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block_hash + ); + let mut shutdown_sender = chain.shutdown_sender(); + shutdown_sender + .try_send(ShutdownReason::Failure( + "Finalized block has an invalid execution payload.", + )) + .map_err(Error::InvalidFinalizedPayloadShutdownError)?; + + // Exit now, the node is in an invalid state. + return Err(Error::InvalidFinalizedPayload { + finalized_root: finalized_proto_block.root, + execution_block_hash: block_hash, + }); + } + + Ok(()) +} + +/// Check to ensure that the transition from `old_view` to `new_view` will not revert finality. +fn check_against_finality_reversion( + old_view: &ForkChoiceView, + new_view: &ForkChoiceView, +) -> Result<(), Error> { + let finalization_equal = new_view.finalized_checkpoint == old_view.finalized_checkpoint; + let finalization_advanced = + new_view.finalized_checkpoint.epoch > old_view.finalized_checkpoint.epoch; + + if finalization_equal || finalization_advanced { + Ok(()) + } else { + Err(Error::RevertedFinalizedEpoch { + old: old_view.finalized_checkpoint, + new: new_view.finalized_checkpoint, + }) + } +} + +fn perform_debug_logging( + old_view: &ForkChoiceView, + new_view: &ForkChoiceView, + fork_choice: &BeaconForkChoice, + log: &Logger, +) { + if new_view.head_block_root != old_view.head_block_root { + debug!( + log, + "Fork choice updated head"; + "new_head_weight" => ?fork_choice + .get_block_weight(&new_view.head_block_root), + "new_head" => ?new_view.head_block_root, + "old_head_weight" => ?fork_choice + .get_block_weight(&old_view.head_block_root), + "old_head" => ?old_view.head_block_root, + ) + } + if new_view.justified_checkpoint != old_view.justified_checkpoint { + debug!( + log, + "Fork choice justified"; + "new_root" => ?new_view.justified_checkpoint.root, + "new_epoch" => new_view.justified_checkpoint.epoch, + "old_root" => ?old_view.justified_checkpoint.root, + "old_epoch" => old_view.justified_checkpoint.epoch, + ) + } + if new_view.finalized_checkpoint != old_view.finalized_checkpoint { + debug!( + log, + "Fork choice finalized"; + "new_root" => ?new_view.finalized_checkpoint.root, + "new_epoch" => new_view.finalized_checkpoint.epoch, + "old_root" => ?old_view.finalized_checkpoint.root, + "old_epoch" => old_view.finalized_checkpoint.epoch, + ) + } +} + +fn spawn_execution_layer_updates( + chain: Arc>, + forkchoice_update_params: ForkchoiceUpdateParameters, +) -> Result>, Error> { + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(Error::UnableToReadSlot)?; + + chain + .task_executor + .clone() + .spawn_handle( + async move { + // Avoids raising an error before Bellatrix. + // + // See `Self::prepare_beacon_proposer` for more detail. + if chain.slot_is_prior_to_bellatrix(current_slot + 1) { + return; + } + + if let Err(e) = chain + .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .await + { + crit!( + chain.log, + "Failed to update execution head"; + "error" => ?e + ); + } + + // Update the mechanism for preparing for block production on the execution layer. + // + // Performing this call immediately after `update_execution_engine_forkchoice_blocking` + // might result in two calls to fork choice updated, one *without* payload attributes and + // then a second *with* payload attributes. + // + // This seems OK. It's not a significant waste of EL<>CL bandwidth or resources, as far as I + // know. + if let Err(e) = chain.prepare_beacon_proposer(current_slot).await { + crit!( + chain.log, + "Failed to prepare proposers after fork choice"; + "error" => ?e + ); + } + }, + "update_el_forkchoice", + ) + .ok_or(Error::RuntimeShutdown) +} + +/// Attempt to detect if the new head is not on the same chain as the previous block +/// (i.e., a re-org). +/// +/// Note: this will declare a re-org if we skip `SLOTS_PER_HISTORICAL_ROOT` blocks +/// between calls to fork choice without swapping between chains. This seems like an +/// extreme-enough scenario that a warning is fine. +fn detect_reorg( + old_state: &BeaconState, + old_block_root: Hash256, + new_state: &BeaconState, + new_block_root: Hash256, + spec: &ChainSpec, + log: &Logger, +) -> Option { + let is_reorg = new_state + .get_block_root(old_state.slot()) + .map_or(true, |root| *root != old_block_root); + + if is_reorg { + let reorg_distance = + match find_reorg_slot(old_state, old_block_root, new_state, new_block_root, spec) { + Ok(slot) => old_state.slot().saturating_sub(slot), + Err(e) => { + warn!( + log, + "Could not find re-org depth"; + "error" => format!("{:?}", e), + ); + return None; + } + }; + + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT_INTEROP); + metrics::set_gauge( + &metrics::FORK_CHOICE_REORG_DISTANCE, + reorg_distance.as_u64() as i64, + ); + warn!( + log, + "Beacon chain re-org"; + "previous_head" => ?old_block_root, + "previous_slot" => old_state.slot(), + "new_head" => ?new_block_root, + "new_slot" => new_state.slot(), + "reorg_distance" => reorg_distance, + ); + + Some(reorg_distance) + } else { + None + } +} + +/// Iterate through the current chain to find the slot intersecting with the given beacon state. +/// The maximum depth this will search is `SLOTS_PER_HISTORICAL_ROOT`, and if that depth is reached +/// and no intersection is found, the finalized slot will be returned. +pub fn find_reorg_slot( + old_state: &BeaconState, + old_block_root: Hash256, + new_state: &BeaconState, + new_block_root: Hash256, + spec: &ChainSpec, +) -> Result { + // The earliest slot for which the two chains may have a common history. + let lowest_slot = std::cmp::min(new_state.slot(), old_state.slot()); + + // Create an iterator across `$state`, assuming that the block at `$state.slot` has the + // block root of `$block_root`. + // + // The iterator will be skipped until the next value returns `lowest_slot`. + // + // This is a macro instead of a function or closure due to the complex types invloved + // in all the iterator wrapping. + macro_rules! aligned_roots_iter { + ($state: ident, $block_root: ident) => { + std::iter::once(Ok(($state.slot(), $block_root))) + .chain($state.rev_iter_block_roots(spec)) + .skip_while(|result| { + result + .as_ref() + .map_or(false, |(slot, _)| *slot > lowest_slot) + }) + }; + } + + // Create iterators across old/new roots where iterators both start at the same slot. + let mut new_roots = aligned_roots_iter!(new_state, new_block_root); + let mut old_roots = aligned_roots_iter!(old_state, old_block_root); + + // Whilst *both* of the iterators are still returning values, try and find a common + // ancestor between them. + while let (Some(old), Some(new)) = (old_roots.next(), new_roots.next()) { + let (old_slot, old_root) = old?; + let (new_slot, new_root) = new?; + + // Sanity check to detect programming errors. + if old_slot != new_slot { + return Err(Error::InvalidReorgSlotIter { new_slot, old_slot }); + } + + if old_root == new_root { + // A common ancestor has been found. + return Ok(old_slot); + } + } + + // If no common ancestor is found, declare that the re-org happened at the previous + // finalized slot. + // + // Sometimes this will result in the return slot being *lower* than the actual reorg + // slot. However, assuming we don't re-org through a finalized slot, it will never be + // *higher*. + // + // We provide this potentially-inaccurate-but-safe information to avoid onerous + // database reads during times of deep reorgs. + Ok(old_state + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch())) +} + +fn observe_head_block_delays( + block_times_cache: &mut BlockTimesCache, + head_block: &ProtoBlock, + head_block_proposer_index: u64, + head_block_graffiti: String, + slot_clock: &S, + event_handler: Option<&ServerSentEventHandler>, + log: &Logger, +) { + let block_time_set_as_head = timestamp_now(); + let head_block_root = head_block.root; + let head_block_slot = head_block.slot; + let head_block_is_optimistic = head_block.execution_status.is_optimistic_or_invalid(); + + // Calculate the total delay between the start of the slot and when it was set as head. + let block_delay_total = get_slot_delay_ms(block_time_set_as_head, head_block_slot, slot_clock); + + // Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to + // the cache during sync. + if block_delay_total < slot_clock.slot_duration() * 64 { + block_times_cache.set_time_set_as_head( + head_block_root, + head_block_slot, + block_time_set_as_head, + ); + } + + // If a block comes in from over 4 slots ago, it is most likely a block from sync. + let block_from_sync = block_delay_total > slot_clock.slot_duration() * 4; + + // Determine whether the block has been set as head too late for proper attestation + // production. + let late_head = block_delay_total >= slot_clock.unagg_attestation_production_delay(); + + // Do not store metrics if the block was > 4 slots old, this helps prevent noise during + // sync. + if !block_from_sync { + // Observe the total block delay. This is the delay between the time the slot started + // and when the block was set as head. + metrics::observe_duration( + &metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME, + block_delay_total, + ); + + // Observe the delay between when we imported the block and when we set the block as + // head. + let block_delays = block_times_cache.get_block_delays( + head_block_root, + slot_clock + .start_of(head_block_slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + metrics::observe_duration( + &metrics::BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME, + block_delays + .observed + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + metrics::observe_duration( + &metrics::BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME, + block_delays + .set_as_head + .unwrap_or_else(|| Duration::from_secs(0)), + ); + + // If the block was enshrined as head too late for attestations to be created for it, + // log a debug warning and increment a metric. + if late_head { + // FIXME(sproul): restore this metric, idk where it went + // metrics::inc_counter(&metrics::BEACON_BLOCK_HEAD_SLOT_START_DELAY_EXCEEDED_TOTAL); + debug!( + log, + "Delayed head block"; + "block_root" => ?head_block_root, + "proposer_index" => head_block_proposer_index, + "slot" => head_block_slot, + "block_delay" => ?block_delay_total, + "observed_delay" => ?block_delays.observed, + "imported_delay" => ?block_delays.imported, + "set_as_head_delay" => ?block_delays.set_as_head, + ); + } + } + + if let Some(event_handler) = event_handler { + if !block_from_sync && late_head && event_handler.has_late_head_subscribers() { + let peer_info = block_times_cache.get_peer_info(head_block_root); + let block_delays = block_times_cache.get_block_delays( + head_block_root, + slot_clock + .start_of(head_block_slot) + .unwrap_or_else(|| Duration::from_secs(0)), + ); + event_handler.register(EventKind::LateHead(SseLateHead { + slot: head_block_slot, + block: head_block_root, + peer_id: peer_info.id, + peer_client: peer_info.client, + proposer_index: head_block_proposer_index, + proposer_graffiti: head_block_graffiti, + block_delay: block_delay_total, + observed_delay: block_delays.observed, + imported_delay: block_delays.imported, + attestable_delay: block_delays.attestable, + set_as_head_delay: block_delays.set_as_head, + execution_optimistic: head_block_is_optimistic, + })); + } + } +} diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 36c2f41d9d..5e16a29cf3 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,3 +1,4 @@ +pub use proto_array::CountUnrealizedFull; use serde_derive::{Deserialize, Serialize}; use types::Checkpoint; @@ -24,6 +25,26 @@ pub struct ChainConfig { /// /// If set to 0 then block proposal will not wait for fork choice at all. pub fork_choice_before_proposal_timeout_ms: u64, + /// Number of skip slots in a row before the BN refuses to use connected builders during payload construction. + pub builder_fallback_skips: usize, + /// Number of skip slots in the past `SLOTS_PER_EPOCH` before the BN refuses to use connected + /// builders during payload construction. + pub builder_fallback_skips_per_epoch: usize, + /// Number of epochs since finalization before the BN refuses to use connected builders during + /// payload construction. + pub builder_fallback_epochs_since_finalization: usize, + /// Whether any chain health checks should be considered when deciding whether to use the builder API. + pub builder_fallback_disable_checks: bool, + /// When set to `true`, weigh the "unrealized" FFG progression when choosing a head in fork + /// choice. + pub count_unrealized: bool, + /// When set to `true`, forget any valid/invalid/optimistic statuses in fork choice during start + /// up. + pub always_reset_payload_statuses: bool, + /// Whether to apply paranoid checks to blocks proposed by this beacon node. + pub paranoid_block_proposal: bool, + /// Whether to strictly count unrealized justified votes. + pub count_unrealized_full: CountUnrealizedFull, } impl Default for ChainConfig { @@ -35,6 +56,15 @@ impl Default for ChainConfig { enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, + // Builder fallback configs that are set in `clap` will override these. + builder_fallback_skips: 3, + builder_fallback_skips_per_epoch: 8, + builder_fallback_epochs_since_finalization: 3, + builder_fallback_disable_checks: false, + count_unrealized: true, + always_reset_payload_statuses: false, + paranoid_block_proposal: false, + count_unrealized_full: CountUnrealizedFull::default(), } } } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index f589585f8a..1ddbe13241 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -4,6 +4,7 @@ use crate::{ }; use parking_lot::RwLock; use proto_array::Block as ProtoBlock; +use std::sync::Arc; use types::*; pub struct CacheItem { @@ -18,7 +19,7 @@ pub struct CacheItem { /* * Values used to make the block available. */ - block: SignedBeaconBlock, + block: Arc>, proto_block: ProtoBlock, } @@ -48,7 +49,7 @@ impl EarlyAttesterCache { pub fn add_head_block( &self, beacon_block_root: Hash256, - block: SignedBeaconBlock, + block: Arc>, proto_block: ProtoBlock, state: &BeaconState, spec: &ChainSpec, @@ -85,7 +86,7 @@ impl EarlyAttesterCache { /// /// - There is a cache `item` present. /// - If `request_slot` is in the same epoch as `item.epoch`. - /// - If `request_index` does not exceed `item.comittee_count`. + /// - If `request_index` does not exceed `item.committee_count`. pub fn try_attest( &self, request_slot: Slot, @@ -146,7 +147,7 @@ impl EarlyAttesterCache { } /// Returns the block, if `block_root` matches the cached item. - pub fn get_block(&self, block_root: Hash256) -> Option> { + pub fn get_block(&self, block_root: Hash256) -> Option>> { self.item .read() .as_ref() diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 2442852be2..e904a26679 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -45,8 +45,8 @@ pub enum BeaconChainError { UnableToReadSlot, UnableToComputeTimeAtSlot, RevertedFinalizedEpoch { - previous_epoch: Epoch, - new_epoch: Epoch, + old: Checkpoint, + new: Checkpoint, }, SlotClockDidNotStart, NoStateForSlot(Slot), @@ -138,6 +138,7 @@ pub enum BeaconChainError { new_slot: Slot, }, AltairForkDisabled, + BuilderMissing, ExecutionLayerMissing, BlockVariantLacksExecutionPayload(Hash256), ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, execution_layer::Error), @@ -161,6 +162,7 @@ pub enum BeaconChainError { BlockRewardSyncError, HeadMissingFromForkChoice(Hash256), FinalizedBlockMissingFromForkChoice(Hash256), + HeadBlockMissingFromForkChoice(Hash256), InvalidFinalizedPayload { finalized_root: Hash256, execution_block_hash: ExecutionBlockHash, @@ -183,12 +185,23 @@ pub enum BeaconChainError { CannotAttestToFinalizedBlock { beacon_block_root: Hash256, }, + SyncContributionDataReferencesFinalizedBlock { + beacon_block_root: Hash256, + }, RuntimeShutdown, + TokioJoin(tokio::task::JoinError), ProcessInvalidExecutionPayload(JoinError), ForkChoiceSignalOutOfOrder { current: Slot, latest: Slot, }, + ForkchoiceUpdateParamsMissing, + HeadHasInvalidPayload { + block_root: Hash256, + execution_status: ExecutionStatus, + }, + AttestationHeadNotInForkChoice(Hash256), + MissingPersistedForkChoice, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -214,7 +227,6 @@ easy_from_to!(BlockReplayError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { - UnableToGetHeadInfo(BeaconChainError), UnableToGetBlockRootFromState, UnableToReadSlot, UnableToProduceAtSlot(Slot), @@ -240,6 +252,11 @@ pub enum BlockProductionError { MissingFinalizedBlock(Hash256), BlockTooLarge(usize), ForkChoiceError(BeaconChainError), + ShuttingDown, + MissingSyncAggregate, + MissingExecutionPayload, + TokioJoin(tokio::task::JoinError), + BeaconChain(BeaconChainError), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 08e4cd41ef..2221d1fc7c 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -7,11 +7,12 @@ //! So, this module contains functions that one might expect to find in other crates, but they live //! here for good reason. +use crate::otb_verification_service::OptimisticTransitionBlock; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::PayloadStatus; +use execution_layer::{BuilderParams, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -21,8 +22,66 @@ use state_processing::per_block_processing::{ partially_verify_execution_payload, }; use std::sync::Arc; +use tokio::task::JoinHandle; +use tree_hash::TreeHash; use types::*; +pub type PreparePayloadResult = Result; +pub type PreparePayloadHandle = JoinHandle>>; + +#[derive(PartialEq)] +pub enum AllowOptimisticImport { + Yes, + No, +} + +/// Used to await the result of executing payload with a remote EE. +pub struct PayloadNotifier { + pub chain: Arc>, + pub block: Arc>, + payload_verification_status: Option, +} + +impl PayloadNotifier { + pub fn new( + chain: Arc>, + block: Arc>, + state: &BeaconState, + ) -> Result> { + let payload_verification_status = if is_execution_enabled(state, block.message().body()) { + // Perform the initial stages of payload verification. + // + // We will duplicate these checks again during `per_block_processing`, however these checks + // are cheap and doing them here ensures we protect the execution engine from junk. + partially_verify_execution_payload( + state, + block.message().execution_payload()?, + &chain.spec, + ) + .map_err(BlockError::PerBlockProcessingError)?; + None + } else { + Some(PayloadVerificationStatus::Irrelevant) + }; + + Ok(Self { + chain, + block, + payload_verification_status, + }) + } + + pub async fn notify_new_payload( + self, + ) -> Result> { + if let Some(precomputed_status) = self.payload_verification_status { + Ok(precomputed_status) + } else { + notify_new_payload(&self.chain, self.block.message()).await + } + } +} + /// Verify that `execution_payload` contained by `block` is considered valid by an execution /// engine. /// @@ -32,31 +91,20 @@ use types::*; /// contains a few extra checks by running `partially_verify_execution_payload` first: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload -pub fn notify_new_payload( +async fn notify_new_payload<'a, T: BeaconChainTypes>( chain: &Arc>, - state: &BeaconState, - block: BeaconBlockRef, + block: BeaconBlockRef<'a, T::EthSpec>, ) -> Result> { - if !is_execution_enabled(state, block.body()) { - return Ok(PayloadVerificationStatus::Irrelevant); - } - let execution_payload = block.execution_payload()?; - // Perform the initial stages of payload verification. - // - // We will duplicate these checks again during `per_block_processing`, however these checks - // are cheap and doing them here ensures we protect the execution payload from junk. - partially_verify_execution_payload(state, execution_payload, &chain.spec) - .map_err(BlockError::PerBlockProcessingError)?; - let execution_layer = chain .execution_layer .as_ref() .ok_or(ExecutionPayloadError::NoExecutionConnection)?; - let new_payload_response = execution_layer.block_on(|execution_layer| { - execution_layer.notify_new_payload(&execution_payload.execution_payload) - }); + + let new_payload_response = execution_layer + .notify_new_payload(&execution_payload.execution_payload) + .await; match new_payload_response { Ok(status) => match status { @@ -65,22 +113,55 @@ pub fn notify_new_payload( Ok(PayloadVerificationStatus::Optimistic) } PayloadStatus::Invalid { - latest_valid_hash, .. + latest_valid_hash, + ref validation_error, } => { + debug!( + chain.log, + "Invalid execution payload"; + "validation_error" => ?validation_error, + "latest_valid_hash" => ?latest_valid_hash, + "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "root" => ?block.tree_hash_root(), + "graffiti" => block.body().graffiti().as_utf8_lossy(), + "proposer_index" => block.proposer_index(), + "slot" => block.slot(), + "method" => "new_payload", + ); + + // latest_valid_hash == 0 implies that this was the terminal block + // Hence, we don't need to run `BeaconChain::process_invalid_execution_payload`. + if latest_valid_hash == ExecutionBlockHash::zero() { + return Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()); + } // This block has not yet been applied to fork choice, so the latest block that was // imported to fork choice was the parent. let latest_root = block.parent_root(); - chain.process_invalid_execution_payload( - &InvalidationOperation::InvalidateMany { + chain + .process_invalid_execution_payload(&InvalidationOperation::InvalidateMany { head_block_root: latest_root, always_invalidate_head: false, latest_valid_ancestor: latest_valid_hash, - }, - )?; + }) + .await?; Err(ExecutionPayloadError::RejectedByExecutionEngine { status }.into()) } - PayloadStatus::InvalidTerminalBlock { .. } | PayloadStatus::InvalidBlockHash { .. } => { + PayloadStatus::InvalidBlockHash { + ref validation_error, + } => { + debug!( + chain.log, + "Invalid execution payload block hash"; + "validation_error" => ?validation_error, + "execution_block_hash" => ?execution_payload.execution_payload.block_hash, + "root" => ?block.tree_hash_root(), + "graffiti" => block.body().graffiti().as_utf8_lossy(), + "proposer_index" => block.proposer_index(), + "slot" => block.slot(), + "method" => "new_payload", + ); + // Returning an error here should be sufficient to invalidate the block. We have no // information to indicate its parent is invalid, so no need to run // `BeaconChain::process_invalid_execution_payload`. @@ -103,9 +184,10 @@ pub fn notify_new_payload( /// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block -pub fn validate_merge_block( - chain: &BeaconChain, - block: BeaconBlockRef, +pub async fn validate_merge_block<'a, T: BeaconChainTypes>( + chain: &Arc>, + block: BeaconBlockRef<'a, T::EthSpec>, + allow_optimistic_import: AllowOptimisticImport, ) -> Result<(), BlockError> { let spec = &chain.spec; let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); @@ -137,9 +219,8 @@ pub fn validate_merge_block( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let is_valid_terminal_pow_block = execution_layer - .block_on(|execution_layer| { - execution_layer.is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) - }) + .is_valid_terminal_pow_block_hash(execution_payload.parent_hash(), spec) + .await .map_err(ExecutionPayloadError::from)?; match is_valid_terminal_pow_block { @@ -149,29 +230,18 @@ pub fn validate_merge_block( } .into()), None => { - let current_slot = chain - .slot_clock - .now() - .ok_or(BeaconChainError::UnableToReadSlot)?; - - // Ensure the block is a candidate for optimistic import. - if chain - .fork_choice - .read() - .is_optimistic_candidate_block( - current_slot, - block.slot(), - &block.parent_root(), - &chain.spec, - ) - .map_err(BeaconChainError::from)? + if allow_optimistic_import == AllowOptimisticImport::Yes + && is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? { debug!( chain.log, - "Optimistically accepting terminal block"; + "Optimistically importing merge transition block"; "block_hash" => ?execution_payload.parent_hash(), "msg" => "the terminal block/parent was unavailable" ); + // Store Optimistic Transition Block in Database for later Verification + OptimisticTransitionBlock::from_block(block) + .persist_in_store::(&chain.store)?; Ok(()) } else { Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()) @@ -180,6 +250,36 @@ pub fn validate_merge_block( } } +/// Check to see if a block with the given parameters is valid to be imported optimistically. +pub async fn is_optimistic_candidate_block( + chain: &Arc>, + block_slot: Slot, + block_parent_root: Hash256, +) -> Result { + let current_slot = chain.slot()?; + let inner_chain = chain.clone(); + + // Use a blocking task to check if the block is an optimistic candidate. Interacting + // with the `fork_choice` lock in an async task can block the core executor. + chain + .spawn_blocking_handle( + move || { + inner_chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_candidate_block( + current_slot, + block_slot, + &block_parent_root, + &inner_chain.spec, + ) + }, + "validate_merge_block_optimistic_candidate", + ) + .await? + .map_err(BeaconChainError::from) +} + /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( @@ -243,33 +343,48 @@ pub fn validate_execution_payload_for_gossip( /// Equivalent to the `get_execution_payload` function in the Validator Guide: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal -pub fn get_execution_payload>( - chain: &BeaconChain, +pub fn get_execution_payload< + T: BeaconChainTypes, + Payload: ExecPayload + Default + Send + 'static, +>( + chain: Arc>, state: &BeaconState, proposer_index: u64, -) -> Result { - Ok( - prepare_execution_payload_blocking::(chain, state, proposer_index)? - .unwrap_or_default(), - ) -} + builder_params: BuilderParams, +) -> Result, BlockProductionError> { + // Compute all required values from the `state` now to avoid needing to pass it into a spawned + // task. + let spec = &chain.spec; + let current_epoch = state.current_epoch(); + let is_merge_transition_complete = is_merge_transition_complete(state); + let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; + let random = *state.get_randao_mix(current_epoch)?; + let latest_execution_payload_header_block_hash = + state.latest_execution_payload_header()?.block_hash; -/// Wraps the async `prepare_execution_payload` function as a blocking task. -pub fn prepare_execution_payload_blocking>( - chain: &BeaconChain, - state: &BeaconState, - proposer_index: u64, -) -> Result, BlockProductionError> { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BlockProductionError::ExecutionLayerMissing)?; + // Spawn a task to obtain the execution payload from the EL via a series of async calls. The + // `join_handle` can be used to await the result of the function. + let join_handle = chain + .task_executor + .clone() + .spawn_handle( + async move { + prepare_execution_payload::( + &chain, + is_merge_transition_complete, + timestamp, + random, + proposer_index, + latest_execution_payload_header_block_hash, + builder_params, + ) + .await + }, + "get_execution_payload", + ) + .ok_or(BlockProductionError::ShuttingDown)?; - execution_layer - .block_on_generic(|_| async { - prepare_execution_payload::(chain, state, proposer_index).await - }) - .map_err(BlockProductionError::BlockingFailed)? + Ok(join_handle) } /// Prepares an execution payload for inclusion in a block. @@ -286,74 +401,87 @@ pub fn prepare_execution_payload_blocking>( - chain: &BeaconChain, - state: &BeaconState, +#[allow(clippy::too_many_arguments)] +pub async fn prepare_execution_payload( + chain: &Arc>, + is_merge_transition_complete: bool, + timestamp: u64, + random: Hash256, proposer_index: u64, -) -> Result, BlockProductionError> { + latest_execution_payload_header_block_hash: ExecutionBlockHash, + builder_params: BuilderParams, +) -> Result +where + T: BeaconChainTypes, + Payload: ExecPayload + Default, +{ + let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; let execution_layer = chain .execution_layer .as_ref() .ok_or(BlockProductionError::ExecutionLayerMissing)?; - let parent_hash = if !is_merge_transition_complete(state) { + let parent_hash = if !is_merge_transition_complete { let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero(); let is_activation_epoch_reached = - state.current_epoch() >= spec.terminal_block_hash_activation_epoch; + current_epoch >= spec.terminal_block_hash_activation_epoch; if is_terminal_block_hash_set && !is_activation_epoch_reached { - return Ok(None); + // Use the "empty" payload if there's a terminal block hash, but we haven't reached the + // terminal block epoch yet. + return Ok(<_>::default()); } let terminal_pow_block_hash = execution_layer - .get_terminal_pow_block_hash(spec) + .get_terminal_pow_block_hash(spec, timestamp) .await .map_err(BlockProductionError::TerminalPoWBlockLookupFailed)?; if let Some(terminal_pow_block_hash) = terminal_pow_block_hash { terminal_pow_block_hash } else { - return Ok(None); + // If the merge transition hasn't occurred yet and the EL hasn't found the terminal + // block, return an "empty" payload. + return Ok(<_>::default()); } } else { - state.latest_execution_payload_header()?.block_hash + latest_execution_payload_header_block_hash }; - let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; - let random = *state.get_randao_mix(state.current_epoch())?; - let finalized_root = state.finalized_checkpoint().root; - - // The finalized block hash is not included in the specification, however we provide this - // parameter so that the execution layer can produce a payload id if one is not already known - // (e.g., due to a recent reorg). - let finalized_block_hash = - if let Some(block) = chain.fork_choice.read().get_block(&finalized_root) { - block.execution_status.block_hash() - } else { - chain - .store - .get_blinded_block(&finalized_root) - .map_err(BlockProductionError::FailedToReadFinalizedBlock)? - .ok_or(BlockProductionError::MissingFinalizedBlock(finalized_root))? - .message() - .body() - .execution_payload() - .ok() - .map(|ep| ep.block_hash()) - }; + // Try to obtain the fork choice update parameters from the cached head. + // + // Use a blocking task to interact with the `canonical_head` lock otherwise we risk blocking the + // core `tokio` executor. + let inner_chain = chain.clone(); + let forkchoice_update_params = chain + .spawn_blocking_handle( + move || { + inner_chain + .canonical_head + .cached_head() + .forkchoice_update_parameters() + }, + "prepare_execution_payload_forkchoice_update_params", + ) + .await + .map_err(BlockProductionError::BeaconChain)?; // Note: the suggested_fee_recipient is stored in the `execution_layer`, it will add this parameter. + // + // This future is not executed here, it's up to the caller to await it. let execution_payload = execution_layer - .get_payload::( + .get_payload::( parent_hash, timestamp, random, - finalized_block_hash.unwrap_or_else(ExecutionBlockHash::zero), proposer_index, + forkchoice_update_params, + builder_params, + &chain.spec, ) .await .map_err(BlockProductionError::GetPayloadFailed)?; - Ok(Some(execution_payload)) + Ok(execution_payload) } diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index fd452c33f8..3d48dfd8f6 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,6 +1,7 @@ use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::{ForkChoice, PayloadVerificationStatus}; +use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus}; use itertools::process_results; +use proto_array::CountUnrealizedFull; use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ @@ -98,7 +99,10 @@ pub fn reset_fork_choice_to_finalization, Cold: It head_block_root: Hash256, head_state: &BeaconState, store: Arc>, + current_slot: Option, spec: &ChainSpec, + count_unrealized_config: CountUnrealized, + count_unrealized_full_config: CountUnrealizedFull, ) -> Result, E>, String> { // Fetch finalized block. let finalized_checkpoint = head_state.finalized_checkpoint(); @@ -139,7 +143,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It })?; let finalized_snapshot = BeaconSnapshot { beacon_block_root: finalized_block_root, - beacon_block: finalized_block, + beacon_block: Arc::new(finalized_block), beacon_state: finalized_state, }; @@ -150,6 +154,9 @@ pub fn reset_fork_choice_to_finalization, Cold: It finalized_block_root, &finalized_snapshot.beacon_block, &finalized_snapshot.beacon_state, + current_slot, + count_unrealized_full_config, + spec, ) .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; @@ -161,7 +168,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; let mut state = finalized_snapshot.beacon_state; - for block in blocks { + let blocks_len = blocks.len(); + for (i, block) in blocks.into_iter().enumerate() { complete_state_advance(&mut state, None, block.slot(), spec) .map_err(|e| format!("State advance failed: {:?}", e))?; @@ -183,17 +191,26 @@ pub fn reset_fork_choice_to_finalization, Cold: It // This scenario is so rare that it seems OK to double-verify some blocks. let payload_verification_status = PayloadVerificationStatus::Optimistic; - let (block, _) = block.deconstruct(); + // Because we are replaying a single chain of blocks, we only need to calculate unrealized + // justification for the last block in the chain. + let is_last_block = i + 1 == blocks_len; + let count_unrealized = if is_last_block { + count_unrealized_config + } else { + CountUnrealized::False + }; + fork_choice .on_block( block.slot(), - &block, + block.message(), block.canonical_root(), // Reward proposer boost. We are reinforcing the canonical chain. Duration::from_secs(0), &state, payload_verification_status, spec, + count_unrealized, ) .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; } diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1891362ebb..cc45a6bb9a 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -7,6 +7,7 @@ use state_processing::{ }; use std::borrow::Cow; use std::iter; +use std::sync::Arc; use std::time::Duration; use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore}; use types::{Hash256, SignedBlindedBeaconBlock, Slot}; @@ -58,7 +59,7 @@ impl BeaconChain { /// Return the number of blocks successfully imported. pub fn import_historical_block_batch( &self, - blocks: Vec>, + blocks: Vec>>, ) -> Result { let anchor_info = self .store diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 644bce65b8..06036c93b4 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -9,16 +9,18 @@ pub mod block_reward; mod block_times_cache; mod block_verification; pub mod builder; +pub mod canonical_head; pub mod chain_config; mod early_attester_cache; mod errors; pub mod eth1_chain; pub mod events; -mod execution_payload; +pub mod execution_payload; pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; +pub mod merge_readiness; mod metrics; pub mod migrate; mod naive_aggregation_pool; @@ -26,6 +28,7 @@ mod observed_aggregates; mod observed_attesters; mod observed_block_producers; pub mod observed_operations; +pub mod otb_verification_service; mod persisted_beacon_chain; mod persisted_fork_choice; mod pre_finalization_cache; @@ -37,22 +40,26 @@ pub mod sync_committee_verification; pub mod test_utils; mod timeout_rw_lock; pub mod validator_monitor; -mod validator_pubkey_cache; +pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, - ForkChoiceError, HeadInfo, HeadSafetyStatus, ProduceBlockVerification, StateSkipConfig, - WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + CountUnrealized, ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, }; pub use self::beacon_snapshot::BeaconSnapshot; -pub use self::chain_config::ChainConfig; +pub use self::chain_config::{ChainConfig, CountUnrealizedFull}; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::historical_blocks::HistoricalBlockError; pub use attestation_verification::Error as AttestationError; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use block_verification::{BlockError, ExecutionPayloadError, GossipVerifiedBlock}; +pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; +pub use execution_layer::EngineState; +pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters}; pub use metrics::scrape_for_metrics; pub use parking_lot; pub use slot_clock; diff --git a/beacon_node/beacon_chain/src/merge_readiness.rs b/beacon_node/beacon_chain/src/merge_readiness.rs new file mode 100644 index 0000000000..4ef2102fd5 --- /dev/null +++ b/beacon_node/beacon_chain/src/merge_readiness.rs @@ -0,0 +1,192 @@ +//! Provides tools for checking if a node is ready for the Bellatrix upgrade and following merge +//! transition. + +use crate::{BeaconChain, BeaconChainTypes}; +use serde::{Deserialize, Serialize, Serializer}; +use std::fmt; +use std::fmt::Write; +use types::*; + +/// The time before the Bellatrix fork when we will start issuing warnings about preparation. +const SECONDS_IN_A_WEEK: u64 = 604800; +pub const MERGE_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct MergeConfig { + #[serde(serialize_with = "serialize_uint256")] + pub terminal_total_difficulty: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub terminal_block_hash: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub terminal_block_hash_epoch: Option, +} + +impl fmt::Display for MergeConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.terminal_block_hash.is_none() + && self.terminal_block_hash_epoch.is_none() + && self.terminal_total_difficulty.is_none() + { + return write!( + f, + "Merge terminal difficulty parameters not configured, check your config" + ); + } + let mut display_string = String::new(); + if let Some(terminal_total_difficulty) = self.terminal_total_difficulty { + write!( + display_string, + "terminal_total_difficulty: {},", + terminal_total_difficulty + )?; + } + if let Some(terminal_block_hash) = self.terminal_block_hash { + write!( + display_string, + "terminal_block_hash: {},", + terminal_block_hash + )?; + } + if let Some(terminal_block_hash_epoch) = self.terminal_block_hash_epoch { + write!( + display_string, + "terminal_block_hash_epoch: {},", + terminal_block_hash_epoch + )?; + } + write!(f, "{}", display_string.trim_end_matches(','))?; + Ok(()) + } +} +impl MergeConfig { + /// Instantiate `self` from the values in a `ChainSpec`. + pub fn from_chainspec(spec: &ChainSpec) -> Self { + let mut params = MergeConfig::default(); + if spec.terminal_total_difficulty != Uint256::max_value() { + params.terminal_total_difficulty = Some(spec.terminal_total_difficulty); + } + if spec.terminal_block_hash != ExecutionBlockHash::zero() { + params.terminal_block_hash = Some(spec.terminal_block_hash); + } + if spec.terminal_block_hash_activation_epoch != Epoch::max_value() { + params.terminal_block_hash_epoch = Some(spec.terminal_block_hash_activation_epoch); + } + params + } +} + +/// Indicates if a node is ready for the Bellatrix upgrade and subsequent merge transition. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] +pub enum MergeReadiness { + /// The node is ready, as far as we can tell. + Ready { + config: MergeConfig, + #[serde(serialize_with = "serialize_uint256")] + current_difficulty: Option, + }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeTransitionConfigurationFailed { error: String }, + /// The EL can be reached and has the correct configuration, however it's not yet synced. + NotSynced, + /// The user has not configured this node to use an execution endpoint. + NoExecutionEndpoint, +} + +impl fmt::Display for MergeReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MergeReadiness::Ready { + config: params, + current_difficulty, + } => { + write!( + f, + "This node appears ready for the merge. \ + Params: {}, current_difficulty: {:?}", + params, current_difficulty + ) + } + MergeReadiness::ExchangeTransitionConfigurationFailed { error } => write!( + f, + "Could not confirm the transition configuration with the \ + execution endpoint: {:?}", + error + ), + MergeReadiness::NotSynced => write!( + f, + "The execution endpoint is connected and configured, \ + however it is not yet synced" + ), + MergeReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement for the merge" + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if user has an EL configured, or if the Bellatrix fork has occurred or will + /// occur within `MERGE_READINESS_PREPARATION_SECONDS`. + pub fn is_time_to_prepare_for_bellatrix(&self, current_slot: Slot) -> bool { + if let Some(bellatrix_epoch) = self.spec.bellatrix_fork_epoch { + let bellatrix_slot = bellatrix_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let merge_readiness_preparation_slots = + MERGE_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + + if self.execution_layer.is_some() { + // The user has already configured an execution layer, start checking for readiness + // right away. + true + } else { + // Return `true` if Bellatrix has happened or is within the preparation time. + current_slot + merge_readiness_preparation_slots > bellatrix_slot + } + } else { + // The Bellatrix fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for the merge. + pub async fn check_merge_readiness(&self) -> MergeReadiness { + if let Some(el) = self.execution_layer.as_ref() { + if let Err(e) = el.exchange_transition_configuration(&self.spec).await { + // The EL was either unreachable, responded with an error or has a different + // configuration. + return MergeReadiness::ExchangeTransitionConfigurationFailed { + error: format!("{:?}", e), + }; + } + + if !el.is_synced_for_notifier().await { + // The EL is not synced. + return MergeReadiness::NotSynced; + } + let params = MergeConfig::from_chainspec(&self.spec); + let current_difficulty = el.get_current_difficulty().await.ok(); + MergeReadiness::Ready { + config: params, + current_difficulty, + } + } else { + // There is no EL configured. + MergeReadiness::NoExecutionEndpoint + } + } +} + +/// Utility function to serialize a Uint256 as a decimal string. +fn serialize_uint256(val: &Option, s: S) -> Result +where + S: Serializer, +{ + match val { + Some(v) => v.to_string().serialize(s), + None => s.serialize_none(), + } +} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index a1e4a84239..e1e48efacb 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -118,14 +118,17 @@ lazy_static! { /* * Block Statistics */ - pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = try_create_histogram( + pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = try_create_histogram_with_buckets( "beacon_operations_per_block_attestation_total", - "Number of attestations in a block" + "Number of attestations in a block", + // Full block is 128. + Ok(vec![0_f64, 1_f64, 3_f64, 15_f64, 31_f64, 63_f64, 127_f64, 255_f64]) ); - pub static ref BLOCK_SIZE: Result = try_create_histogram( + pub static ref BLOCK_SIZE: Result = try_create_histogram_with_buckets( "beacon_block_total_size", - "Size of a signed beacon block" + "Size of a signed beacon block", + linear_buckets(5120_f64,5120_f64,10) ); /* @@ -298,6 +301,10 @@ lazy_static! { "beacon_fork_choice_reorg_total", "Count of occasions fork choice has switched to a different chain" ); + pub static ref FORK_CHOICE_REORG_DISTANCE: Result = try_create_int_gauge( + "beacon_fork_choice_reorg_distance", + "The distance of each re-org of the fork choice algorithm" + ); pub static ref FORK_CHOICE_REORG_COUNT_INTEROP: Result = try_create_int_counter( "beacon_reorgs_total", "Count of occasions fork choice has switched to a different chain" @@ -321,7 +328,7 @@ lazy_static! { pub static ref BALANCES_CACHE_HITS: Result = try_create_int_counter("beacon_balances_cache_hits_total", "Count of times balances cache fulfils request"); pub static ref BALANCES_CACHE_MISSES: Result = - try_create_int_counter("beacon_balances_cache_misses_total", "Count of times balances cache fulfils request"); + try_create_int_counter("beacon_balances_cache_misses_total", "Count of times balances cache misses request"); /* * Persisting BeaconChain components to disk @@ -774,25 +781,33 @@ lazy_static! { /* * Block Delay Metrics */ - pub static ref BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME: Result = try_create_histogram( + pub static ref BEACON_BLOCK_OBSERVED_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_observed_slot_start_delay_time", "Duration between the start of the block's slot and the time the block was observed.", + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + decimal_buckets(-1,2) ); - pub static ref BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME: Result = try_create_histogram( + pub static ref BEACON_BLOCK_IMPORTED_OBSERVED_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_imported_observed_delay_time", "Duration between the time the block was observed and the time when it was imported.", + // [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] + decimal_buckets(-2,0) ); - pub static ref BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME: Result = try_create_histogram( + pub static ref BEACON_BLOCK_HEAD_IMPORTED_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_head_imported_delay_time", "Duration between the time the block was imported and the time when it was set as head.", + // [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] + decimal_buckets(-2,-1) ); pub static ref BEACON_BLOCK_HEAD_ATTESTABLE_DELAY_TIME: Result = try_create_histogram( "beacon_block_head_attestable_delay_time", "Duration between the start of the slot and the time at which the block could be attested to.", ); - pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME: Result = try_create_histogram( + pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_head_slot_start_delay_time", "Duration between the start of the block's slot and the time when it was set as head.", + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + decimal_buckets(-1,2) ); pub static ref BEACON_BLOCK_HEAD_MISSED_ATT_DEADLINE_LATE: Result = try_create_int_counter( "beacon_block_head_missed_att_deadline_late", diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 84a0b1e8dd..64d3bd3c8b 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -55,7 +55,13 @@ pub enum PruningOutcome { Successful { old_finalized_checkpoint: Checkpoint, }, - DeferredConcurrentMutation, + /// The run was aborted because the new finalized checkpoint is older than the previous one. + OutOfOrderFinalization { + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + }, + /// The run was aborted due to a concurrent mutation of the head tracker. + DeferredConcurrentHeadTrackerMutation, } /// Logic errors that can occur during pruning, none of these should ever happen. @@ -68,6 +74,10 @@ pub enum PruningError { MissingInfoForCanonicalChain { slot: Slot, }, + FinalizedStateOutOfOrder { + old_finalized_checkpoint: Checkpoint, + new_finalized_checkpoint: Checkpoint, + }, UnexpectedEqualStateRoots, UnexpectedUnequalStateRoots, } @@ -224,7 +234,7 @@ impl, Cold: ItemStore> BackgroundMigrator old_finalized_checkpoint, - Ok(PruningOutcome::DeferredConcurrentMutation) => { + Ok(PruningOutcome::DeferredConcurrentHeadTrackerMutation) => { warn!( log, "Pruning deferred because of a concurrent mutation"; @@ -232,8 +242,21 @@ impl, Cold: ItemStore> BackgroundMigrator { + warn!( + log, + "Ignoring out of order finalization request"; + "old_finalized_epoch" => old_finalized_checkpoint.epoch, + "new_finalized_epoch" => new_finalized_checkpoint.epoch, + "message" => "this is expected occasionally due to a (harmless) race condition" + ); + return; + } Err(e) => { - warn!(log, "Block pruning failed"; "error" => format!("{:?}", e)); + warn!(log, "Block pruning failed"; "error" => ?e); return; } }; @@ -353,6 +376,16 @@ impl, Cold: ItemStore> BackgroundMigrator new_finalized_slot { + return Ok(PruningOutcome::OutOfOrderFinalization { + old_finalized_checkpoint, + new_finalized_checkpoint, + }); + } + debug!( log, "Starting database pruning"; @@ -505,7 +538,7 @@ impl, Cold: ItemStore> BackgroundMigrator, E: EthSpec> { /// previously seen attester slashings, i.e. those validators in the intersection of /// `attestation_1.attester_indices` and `attestation_2.attester_indices`. observed_validator_indices: HashSet, + /// The name of the current fork. The default will be overwritten on first use. + #[derivative(Default(value = "ForkName::Base"))] + current_fork: ForkName, _phantom: PhantomData<(T, E)>, } /// Was the observed operation new and valid for further processing, or a useless duplicate? #[derive(Debug, PartialEq, Eq, Clone)] -pub enum ObservationOutcome { - New(SigVerifiedOp), +pub enum ObservationOutcome { + New(SigVerifiedOp), AlreadyKnown, } @@ -81,7 +86,9 @@ impl, E: EthSpec> ObservedOperations { op: T, head_state: &BeaconState, spec: &ChainSpec, - ) -> Result, T::Error> { + ) -> Result, T::Error> { + self.reset_at_fork_boundary(head_state.slot(), spec); + let observed_validator_indices = &mut self.observed_validator_indices; let new_validator_indices = op.observed_validators(); @@ -107,4 +114,23 @@ impl, E: EthSpec> ObservedOperations { Ok(ObservationOutcome::New(verified_op)) } + + /// Reset the cache when crossing a fork boundary. + /// + /// This prevents an attacker from crafting a self-slashing which is only valid before the fork + /// (e.g. using the Altair fork domain at a Bellatrix epoch), in order to prevent propagation of + /// all other slashings due to the duplicate check. + /// + /// It doesn't matter if this cache gets reset too often, as we reset it on restart anyway and a + /// false negative just results in propagation of messages which should have been ignored. + /// + /// In future we could check slashing relevance against the op pool itself, but that would + /// require indexing the attester slashings in the op pool by validator index. + fn reset_at_fork_boundary(&mut self, head_slot: Slot, spec: &ChainSpec) { + let head_fork = spec.fork_name_at_slot::(head_slot); + if head_fork != self.current_fork { + self.observed_validator_indices.clear(); + self.current_fork = head_fork; + } + } } diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs new file mode 100644 index 0000000000..97f6f8c3e0 --- /dev/null +++ b/beacon_node/beacon_chain/src/otb_verification_service.rs @@ -0,0 +1,378 @@ +use crate::execution_payload::{validate_merge_block, AllowOptimisticImport}; +use crate::{ + BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, +}; +use itertools::process_results; +use proto_array::InvalidationOperation; +use slog::{crit, debug, error, info, warn}; +use slot_clock::SlotClock; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use state_processing::per_block_processing::is_merge_transition_complete; +use std::sync::Arc; +use store::{DBColumn, Error as StoreError, HotColdDB, KeyValueStore, StoreItem}; +use task_executor::{ShutdownReason, TaskExecutor}; +use tokio::time::sleep; +use tree_hash::TreeHash; +use types::{BeaconBlockRef, EthSpec, Hash256, Slot}; +use DBColumn::OptimisticTransitionBlock as OTBColumn; + +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +pub struct OptimisticTransitionBlock { + root: Hash256, + slot: Slot, +} + +impl OptimisticTransitionBlock { + // types::BeaconBlockRef<'_, ::EthSpec> + pub fn from_block(block: BeaconBlockRef) -> Self { + Self { + root: block.tree_hash_root(), + slot: block.slot(), + } + } + + pub fn root(&self) -> &Hash256 { + &self.root + } + + pub fn slot(&self) -> &Slot { + &self.slot + } + + pub fn persist_in_store(&self, store: A) -> Result<(), StoreError> + where + T: BeaconChainTypes, + A: AsRef>, + { + if store + .as_ref() + .item_exists::(&self.root)? + { + Ok(()) + } else { + store.as_ref().put_item(&self.root, self) + } + } + + pub fn remove_from_store(&self, store: A) -> Result<(), StoreError> + where + T: BeaconChainTypes, + A: AsRef>, + { + store + .as_ref() + .hot_db + .key_delete(OTBColumn.into(), self.root.as_bytes()) + } + + fn is_canonical( + &self, + chain: &BeaconChain, + ) -> Result { + Ok(chain + .forwards_iter_block_roots_until(self.slot, self.slot)? + .next() + .transpose()? + .map(|(root, _)| root) + == Some(self.root)) + } +} + +impl StoreItem for OptimisticTransitionBlock { + fn db_column() -> DBColumn { + OTBColumn + } + + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} + +/// The routine is expected to run once per epoch, 1/4th through the epoch. +pub const EPOCH_DELAY_FACTOR: u32 = 4; + +/// Spawns a routine which checks the validity of any optimistically imported transition blocks +/// +/// This routine will run once per epoch, at `epoch_duration / EPOCH_DELAY_FACTOR` after +/// the start of each epoch. +/// +/// The service will not be started if there is no `execution_layer` on the `chain`. +pub fn start_otb_verification_service( + executor: TaskExecutor, + chain: Arc>, +) { + // Avoid spawning the service if there's no EL, it'll just error anyway. + if chain.execution_layer.is_some() { + executor.spawn( + async move { otb_verification_service(chain).await }, + "otb_verification_service", + ); + } +} + +pub fn load_optimistic_transition_blocks( + chain: &BeaconChain, +) -> Result, StoreError> { + process_results(chain.store.hot_db.iter_column(OTBColumn), |iter| { + iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) + .collect() + })? +} + +#[derive(Debug)] +pub enum Error { + ForkChoice(String), + BeaconChain(BeaconChainError), + StoreError(StoreError), + NoBlockFound(OptimisticTransitionBlock), +} + +pub async fn validate_optimistic_transition_blocks( + chain: &Arc>, + otbs: Vec, +) -> Result<(), Error> { + let finalized_slot = chain + .canonical_head + .fork_choice_read_lock() + .get_finalized_block() + .map_err(|e| Error::ForkChoice(format!("{:?}", e)))? + .slot; + + // separate otbs into + // non-canonical + // finalized canonical + // unfinalized canonical + let mut non_canonical_otbs = vec![]; + let (finalized_canonical_otbs, unfinalized_canonical_otbs) = process_results( + otbs.into_iter().map(|otb| { + otb.is_canonical(chain) + .map(|is_canonical| (otb, is_canonical)) + }), + |pair_iter| { + pair_iter + .filter_map(|(otb, is_canonical)| { + if is_canonical { + Some(otb) + } else { + non_canonical_otbs.push(otb); + None + } + }) + .partition::, _>(|otb| *otb.slot() <= finalized_slot) + }, + ) + .map_err(Error::BeaconChain)?; + + // remove non-canonical blocks that conflict with finalized checkpoint from the database + for otb in non_canonical_otbs { + if *otb.slot() <= finalized_slot { + otb.remove_from_store::(&chain.store) + .map_err(Error::StoreError)?; + } + } + + // ensure finalized canonical otb are valid, otherwise kill client + for otb in finalized_canonical_otbs { + match chain.get_block(otb.root()).await { + Ok(Some(block)) => { + match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await + { + Ok(()) => { + // merge transition block is valid, remove it from OTB + otb.remove_from_store::(&chain.store) + .map_err(Error::StoreError)?; + info!( + chain.log, + "Validated merge transition block"; + "block_root" => ?otb.root(), + "type" => "finalized" + ); + } + // The block was not able to be verified by the EL. Leave the OTB in the + // database since the EL is likely still syncing and may verify the block + // later. + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::UnverifiedNonOptimisticCandidate, + )) => (), + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, + )) => { + // Finalized Merge Transition Block is Invalid! Kill the Client! + crit!( + chain.log, + "Finalized merge transition block is invalid!"; + "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ + You may be on a hostile network.", + "block_hash" => ?block.canonical_root() + ); + let mut shutdown_sender = chain.shutdown_sender(); + if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + )) { + crit!( + chain.log, + "Failed to shut down client"; + "error" => ?e, + "shutdown_reason" => INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON + ); + } + } + _ => {} + } + } + Ok(None) => return Err(Error::NoBlockFound(otb)), + // Our database has pruned the payload and the payload was unavailable on the EL since + // the EL is still syncing or the payload is non-canonical. + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), + Err(e) => return Err(Error::BeaconChain(e)), + } + } + + // attempt to validate any non-finalized canonical otb blocks + for otb in unfinalized_canonical_otbs { + match chain.get_block(otb.root()).await { + Ok(Some(block)) => { + match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await + { + Ok(()) => { + // merge transition block is valid, remove it from OTB + otb.remove_from_store::(&chain.store) + .map_err(Error::StoreError)?; + info!( + chain.log, + "Validated merge transition block"; + "block_root" => ?otb.root(), + "type" => "not finalized" + ); + } + // The block was not able to be verified by the EL. Leave the OTB in the + // database since the EL is likely still syncing and may verify the block + // later. + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::UnverifiedNonOptimisticCandidate, + )) => (), + Err(BlockError::ExecutionPayloadError( + ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, + )) => { + // Unfinalized Merge Transition Block is Invalid -> Run process_invalid_execution_payload + warn!( + chain.log, + "Merge transition block invalid"; + "block_root" => ?otb.root() + ); + chain + .process_invalid_execution_payload( + &InvalidationOperation::InvalidateOne { + block_root: *otb.root(), + }, + ) + .await + .map_err(|e| { + warn!( + chain.log, + "Error checking merge transition block"; + "error" => ?e, + "location" => "process_invalid_execution_payload" + ); + Error::BeaconChain(e) + })?; + } + _ => {} + } + } + Ok(None) => return Err(Error::NoBlockFound(otb)), + // Our database has pruned the payload and the payload was unavailable on the EL since + // the EL is still syncing or the payload is non-canonical. + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), + Err(e) => return Err(Error::BeaconChain(e)), + } + } + + Ok(()) +} + +/// Loop until any optimistically imported merge transition blocks have been verified and +/// the merge has been finalized. +async fn otb_verification_service(chain: Arc>) { + let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; + loop { + match chain + .slot_clock + .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) + { + Some(duration) => { + let additional_delay = epoch_duration / EPOCH_DELAY_FACTOR; + sleep(duration + additional_delay).await; + + debug!( + chain.log, + "OTB verification service firing"; + ); + + if !is_merge_transition_complete( + &chain.canonical_head.cached_head().snapshot.beacon_state, + ) { + // We are pre-merge. Nothing to do yet. + continue; + } + + // load all optimistically imported transition blocks from the database + match load_optimistic_transition_blocks(chain.as_ref()) { + Ok(otbs) => { + if otbs.is_empty() { + if chain + .canonical_head + .fork_choice_read_lock() + .get_finalized_block() + .map_or(false, |block| { + block.execution_status.is_execution_enabled() + }) + { + // there are no optimistic blocks in the database, we can exit + // the service since the merge transition is finalized and we'll + // never see another transition block + break; + } else { + debug!( + chain.log, + "No optimistic transition blocks"; + "info" => "waiting for the merge transition to finalize" + ) + } + } + if let Err(e) = validate_optimistic_transition_blocks(&chain, otbs).await { + warn!( + chain.log, + "Error while validating optimistic transition blocks"; + "error" => ?e + ); + } + } + Err(e) => { + error!( + chain.log, + "Error loading optimistic transition blocks"; + "error" => ?e + ); + } + }; + } + None => { + error!(chain.log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(chain.slot_clock.slot_duration()).await; + } + }; + } + debug!( + chain.log, + "No optimistic transition blocks in database"; + "msg" => "shutting down OTB verification service" + ); +} diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index 3935c6214c..a0a7f1f65e 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,5 +1,6 @@ use crate::beacon_fork_choice_store::{ - PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, + PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11, + PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, }; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -7,10 +8,10 @@ use store::{DBColumn, Error, StoreItem}; use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. -pub type PersistedForkChoice = PersistedForkChoiceV8; +pub type PersistedForkChoice = PersistedForkChoiceV11; #[superstruct( - variants(V1, V7, V8), + variants(V1, V7, V8, V10, V11), variant_attributes(derive(Encode, Decode)), no_enum )] @@ -22,6 +23,10 @@ pub struct PersistedForkChoice { pub fork_choice_store: PersistedForkChoiceStoreV7, #[superstruct(only(V8))] pub fork_choice_store: PersistedForkChoiceStoreV8, + #[superstruct(only(V10))] + pub fork_choice_store: PersistedForkChoiceStoreV10, + #[superstruct(only(V11))] + pub fork_choice_store: PersistedForkChoiceStoreV11, } macro_rules! impl_store_item { @@ -45,3 +50,5 @@ macro_rules! impl_store_item { impl_store_item!(PersistedForkChoiceV1); impl_store_item!(PersistedForkChoiceV7); impl_store_item!(PersistedForkChoiceV8); +impl_store_item!(PersistedForkChoiceV10); +impl_store_item!(PersistedForkChoiceV11); diff --git a/beacon_node/beacon_chain/src/proposer_prep_service.rs b/beacon_node/beacon_chain/src/proposer_prep_service.rs index 18abbc8c5b..9cd177b340 100644 --- a/beacon_node/beacon_chain/src/proposer_prep_service.rs +++ b/beacon_node/beacon_chain/src/proposer_prep_service.rs @@ -51,9 +51,7 @@ async fn proposer_prep_service( executor.spawn( async move { if let Ok(current_slot) = inner_chain.slot() { - if let Err(e) = inner_chain - .prepare_beacon_proposer_async(current_slot) - .await + if let Err(e) = inner_chain.prepare_beacon_proposer(current_slot).await { error!( inner_chain.log, diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 5a8d7e1c52..23792ede58 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,4 +1,7 @@ //! Utilities for managing database schema changes. +mod migration_schema_v10; +mod migration_schema_v11; +mod migration_schema_v12; mod migration_schema_v20; mod migration_schema_v6; mod migration_schema_v7; @@ -7,7 +10,11 @@ mod migration_schema_v9; mod types; use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; -use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; +use crate::persisted_fork_choice::{ + PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7, + PersistedForkChoiceV8, +}; +use crate::types::ChainSpec; use slog::{warn, Logger}; use std::path::Path; use std::sync::Arc; @@ -22,23 +29,30 @@ pub fn migrate_schema( from: SchemaVersion, to: SchemaVersion, log: Logger, + spec: &ChainSpec, ) -> Result<(), StoreError> { match (from, to) { // Migrating from the current schema version to iself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), // Upgrade for tree-states database changes. - (SchemaVersion(9), SchemaVersion(20)) => migration_schema_v20::upgrade_to_v20::(db, log), - // FIXME(sproul): this is a temporary patch remove it before merging - (SchemaVersion(10), SchemaVersion(20)) => db.store_schema_version(to), + (SchemaVersion(12), SchemaVersion(20)) => { + migration_schema_v20::upgrade_to_v20::(db, log) + } // Downgrade for tree-states database changes. - (SchemaVersion(20), SchemaVersion(9)) => { + (SchemaVersion(20), SchemaVersion(12)) => { migration_schema_v20::downgrade_from_v20::(db, log) } // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone())?; - migrate_schema::(db, datadir, next, to, log) + migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; + migrate_schema::(db, datadir, next, to, log, spec) + } + // Downgrade across multiple versions by recursively migrating one step at a time. + (_, _) if to.as_u64() + 1 < from.as_u64() => { + let next = SchemaVersion(from.as_u64() - 1); + migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; + migrate_schema::(db, datadir, next, to, log, spec) } // @@ -98,6 +112,7 @@ pub fn migrate_schema( migration_schema_v7::update_with_reinitialized_fork_choice::( &mut persisted_fork_choice_v7, db.clone(), + spec, ) .map_err(StoreError::SchemaMigrationError)?; } @@ -136,6 +151,71 @@ pub fn migrate_schema( migration_schema_v9::downgrade_from_v9::(db.clone(), log)?; db.store_schema_version(to) } + (SchemaVersion(9), SchemaVersion(10)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?; + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)?); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + (SchemaVersion(10), SchemaVersion(9)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = migration_schema_v10::downgrade_fork_choice(fork_choice)?; + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)?); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + // Upgrade from v10 to v11 adding support for equivocating indices to fork choice. + (SchemaVersion(10), SchemaVersion(11)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = migration_schema_v11::update_fork_choice(fork_choice); + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)?); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + // Downgrade from v11 to v10 removing support for equivocating indices from fork choice. + (SchemaVersion(11), SchemaVersion(10)) => { + let mut ops = vec![]; + let fork_choice_opt = db.get_item::(&FORK_CHOICE_DB_KEY)?; + if let Some(fork_choice) = fork_choice_opt { + let updated_fork_choice = + migration_schema_v11::downgrade_fork_choice(fork_choice, log); + + ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)?); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + // Upgrade from v11 to v12 to store richer metadata in the attestation op pool. + (SchemaVersion(11), SchemaVersion(12)) => { + let ops = migration_schema_v12::upgrade_to_v12::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + // Downgrade from v12 to v11 to drop richer metadata from the attestation op pool. + (SchemaVersion(12), SchemaVersion(11)) => { + let ops = migration_schema_v12::downgrade_from_v12::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs new file mode 100644 index 0000000000..70e0007851 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v10.rs @@ -0,0 +1,97 @@ +use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV8}; +use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV8}; +use crate::schema_change::{ + types::{SszContainerV10, SszContainerV7}, + StoreError, +}; +use proto_array::core::SszContainer; +use ssz::{Decode, Encode}; + +pub fn update_fork_choice( + mut fork_choice: PersistedForkChoiceV8, +) -> Result { + let ssz_container_v7 = SszContainerV7::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + StoreError::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + // These transformations instantiate `node.unrealized_justified_checkpoint` and + // `node.unrealized_finalized_checkpoint` to `None`. + let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); + let ssz_container: SszContainer = ssz_container_v10.into(); + fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +pub fn downgrade_fork_choice( + mut fork_choice: PersistedForkChoiceV10, +) -> Result { + let ssz_container_v10 = SszContainerV10::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + StoreError::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + let ssz_container_v7: SszContainerV7 = ssz_container_v10.into(); + fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +impl From for PersistedForkChoiceStoreV10 { + fn from(other: PersistedForkChoiceStoreV8) -> Self { + Self { + balances_cache: other.balances_cache, + time: other.time, + finalized_checkpoint: other.finalized_checkpoint, + justified_checkpoint: other.justified_checkpoint, + justified_balances: other.justified_balances, + best_justified_checkpoint: other.best_justified_checkpoint, + unrealized_justified_checkpoint: other.best_justified_checkpoint, + unrealized_finalized_checkpoint: other.finalized_checkpoint, + proposer_boost_root: other.proposer_boost_root, + } + } +} + +impl From for PersistedForkChoiceV10 { + fn from(other: PersistedForkChoiceV8) -> Self { + Self { + fork_choice: other.fork_choice, + fork_choice_store: other.fork_choice_store.into(), + } + } +} + +impl From for PersistedForkChoiceStoreV8 { + fn from(other: PersistedForkChoiceStoreV10) -> Self { + Self { + balances_cache: other.balances_cache, + time: other.time, + finalized_checkpoint: other.finalized_checkpoint, + justified_checkpoint: other.justified_checkpoint, + justified_balances: other.justified_balances, + best_justified_checkpoint: other.best_justified_checkpoint, + proposer_boost_root: other.proposer_boost_root, + } + } +} + +impl From for PersistedForkChoiceV8 { + fn from(other: PersistedForkChoiceV10) -> Self { + Self { + fork_choice: other.fork_choice, + fork_choice_store: other.fork_choice_store.into(), + } + } +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs new file mode 100644 index 0000000000..dde80a5cac --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v11.rs @@ -0,0 +1,77 @@ +use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11}; +use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV11}; +use slog::{warn, Logger}; +use std::collections::BTreeSet; + +/// Add the equivocating indices field. +pub fn update_fork_choice(fork_choice_v10: PersistedForkChoiceV10) -> PersistedForkChoiceV11 { + let PersistedForkChoiceStoreV10 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + } = fork_choice_v10.fork_choice_store; + + PersistedForkChoiceV11 { + fork_choice: fork_choice_v10.fork_choice, + fork_choice_store: PersistedForkChoiceStoreV11 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + equivocating_indices: BTreeSet::new(), + }, + } +} + +pub fn downgrade_fork_choice( + fork_choice_v11: PersistedForkChoiceV11, + log: Logger, +) -> PersistedForkChoiceV10 { + let PersistedForkChoiceStoreV11 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + equivocating_indices, + } = fork_choice_v11.fork_choice_store; + + if !equivocating_indices.is_empty() { + warn!( + log, + "Deleting slashed validators from fork choice store"; + "count" => equivocating_indices.len(), + "message" => "this may make your node more susceptible to following the wrong chain", + ); + } + + PersistedForkChoiceV10 { + fork_choice: fork_choice_v11.fork_choice, + fork_choice_store: PersistedForkChoiceStoreV10 { + balances_cache, + time, + finalized_checkpoint, + justified_checkpoint, + justified_balances, + best_justified_checkpoint, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + proposer_boost_root, + }, + } +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs new file mode 100644 index 0000000000..17ca06dd46 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs @@ -0,0 +1,226 @@ +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}; +use crate::persisted_fork_choice::PersistedForkChoiceV11; +use operation_pool::{PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5}; +use slog::{debug, info, Logger}; +use state_processing::{ + common::get_indexed_attestation, per_block_processing::is_valid_indexed_attestation, + VerifyOperation, VerifySignatures, +}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_to_v12( + db: Arc>, + log: Logger, +) -> Result, Error> { + let spec = db.get_chain_spec(); + + // Load a V5 op pool and transform it to V12. + let PersistedOperationPoolV5 { + attestations_v5, + sync_contributions, + attester_slashings_v5, + proposer_slashings_v5, + voluntary_exits_v5, + } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + // Load the persisted fork choice so we can grab the state of the justified block and use + // it to verify the stored attestations, slashings and exits. + let fork_choice = db + .get_item::(&FORK_CHOICE_DB_KEY)? + .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; + let justified_block_root = fork_choice + .fork_choice_store + .unrealized_justified_checkpoint + .root; + let justified_block = db + .get_blinded_block(&justified_block_root)? + .ok_or_else(|| { + Error::SchemaMigrationError(format!( + "unrealized justified block missing for migration: {justified_block_root:?}", + )) + })?; + let justified_state_root = justified_block.state_root(); + let mut state = db + .get_state(&justified_state_root, Some(justified_block.slot()))? + .ok_or_else(|| { + Error::SchemaMigrationError(format!( + "justified state missing for migration: {justified_state_root:?}" + )) + })?; + state.build_all_committee_caches(spec).map_err(|e| { + Error::SchemaMigrationError(format!("unable to build committee caches: {e:?}")) + })?; + + // Re-verify attestations while adding attesting indices. + let attestations = attestations_v5 + .into_iter() + .flat_map(|(_, attestations)| attestations) + .filter_map(|attestation| { + let res = state + .get_beacon_committee(attestation.data.slot, attestation.data.index) + .map_err(Into::into) + .and_then(|committee| get_indexed_attestation(committee.committee, &attestation)) + .and_then(|indexed_attestation| { + is_valid_indexed_attestation( + &state, + &indexed_attestation, + VerifySignatures::True, + spec, + )?; + Ok(indexed_attestation) + }); + + match res { + Ok(indexed) => Some((attestation, indexed.attesting_indices.into())), + Err(e) => { + debug!( + log, + "Dropping attestation on migration"; + "err" => ?e, + "head_block" => ?attestation.data.beacon_block_root, + ); + None + } + } + }) + .collect::>(); + + let attester_slashings = attester_slashings_v5 + .iter() + .filter_map(|(slashing, _)| { + slashing + .clone() + .validate(&state, spec) + .map_err(|e| { + debug!( + log, + "Dropping attester slashing on migration"; + "err" => ?e, + "slashing" => ?slashing, + ); + }) + .ok() + }) + .collect::>(); + + let proposer_slashings = proposer_slashings_v5 + .iter() + .filter_map(|slashing| { + slashing + .clone() + .validate(&state, spec) + .map_err(|e| { + debug!( + log, + "Dropping proposer slashing on migration"; + "err" => ?e, + "slashing" => ?slashing, + ); + }) + .ok() + }) + .collect::>(); + + let voluntary_exits = voluntary_exits_v5 + .iter() + .filter_map(|exit| { + exit.clone() + .validate(&state, spec) + .map_err(|e| { + debug!( + log, + "Dropping voluntary exit on migration"; + "err" => ?e, + "exit" => ?exit, + ); + }) + .ok() + }) + .collect::>(); + + debug!( + log, + "Migrated op pool"; + "attestations" => attestations.len(), + "attester_slashings" => attester_slashings.len(), + "proposer_slashings" => proposer_slashings.len(), + "voluntary_exits" => voluntary_exits.len() + ); + + let v12 = PersistedOperationPool::V12(PersistedOperationPoolV12 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + }); + Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)?]) +} + +pub fn downgrade_from_v12( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Load a V12 op pool and transform it to V5. + let PersistedOperationPoolV12 { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + } = if let Some(PersistedOperationPool::::V12(op_pool)) = + db.get_item(&OP_POOL_DB_KEY)? + { + op_pool + } else { + debug!(log, "Nothing to do, no operation pool stored"); + return Ok(vec![]); + }; + + info!( + log, + "Dropping attestations from pool"; + "count" => attestations.len(), + ); + + let attester_slashings_v5 = attester_slashings + .into_iter() + .filter_map(|slashing| { + let fork_version = slashing.first_fork_verified_against()?; + Some((slashing.into_inner(), fork_version)) + }) + .collect::>(); + + let proposer_slashings_v5 = proposer_slashings + .into_iter() + .map(|slashing| slashing.into_inner()) + .collect::>(); + + let voluntary_exits_v5 = voluntary_exits + .into_iter() + .map(|exit| exit.into_inner()) + .collect::>(); + + info!( + log, + "Migrated slashings and exits"; + "attester_slashings" => attester_slashings_v5.len(), + "proposer_slashings" => proposer_slashings_v5.len(), + "voluntary_exits" => voluntary_exits_v5.len(), + ); + + let v5 = PersistedOperationPoolV5 { + attestations_v5: vec![], + sync_contributions, + attester_slashings_v5, + proposer_slashings_v5, + voluntary_exits_v5, + }; + Ok(vec![v5.as_kv_store_op(OP_POOL_DB_KEY)?]) +} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs index 4cede798ea..4a9a78db7b 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v7.rs @@ -2,12 +2,11 @@ use crate::beacon_chain::BeaconChainTypes; use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; -use crate::schema_change::types::{ProtoNodeV6, SszContainerV6, SszContainerV7}; -use crate::types::{Checkpoint, Epoch, Hash256}; -use crate::types::{EthSpec, Slot}; +use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7}; +use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use fork_choice::ForkChoice; -use proto_array::{core::ProtoNode, core::SszContainer, ProtoArrayForkChoice}; +use proto_array::{core::ProtoNode, core::SszContainer, CountUnrealizedFull, ProtoArrayForkChoice}; use ssz::four_byte_option_impl; use ssz::{Decode, Encode}; use std::collections::{HashMap, HashSet}; @@ -25,6 +24,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); pub(crate) fn update_with_reinitialized_fork_choice( persisted_fork_choice: &mut PersistedForkChoiceV7, db: Arc>, + spec: &ChainSpec, ) -> Result<(), String> { let anchor_block_root = persisted_fork_choice .fork_choice_store @@ -39,7 +39,7 @@ pub(crate) fn update_with_reinitialized_fork_choice( .map_err(|e| format!("{:?}", e))? .ok_or_else(|| "Missing anchor beacon state".to_string())?; let snapshot = BeaconSnapshot { - beacon_block: anchor_block, + beacon_block: Arc::new(anchor_block), beacon_block_root: anchor_block_root, beacon_state: anchor_state, }; @@ -49,6 +49,12 @@ pub(crate) fn update_with_reinitialized_fork_choice( anchor_block_root, &snapshot.beacon_block, &snapshot.beacon_state, + // Don't provide the current slot here, just use what's in the store. We don't need to know + // the head here, plus it's nice to avoid mutating fork choice during this process. + None, + // This config will get overwritten on startup. + CountUnrealizedFull::default(), + spec, ) .map_err(|e| format!("{:?}", e))?; persisted_fork_choice.fork_choice = fork_choice.to_persisted(); @@ -82,8 +88,11 @@ pub(crate) fn update_fork_choice( // to `None`. let ssz_container_v7: SszContainerV7 = ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint); - let ssz_container: SszContainer = ssz_container_v7.into(); - let mut fork_choice: ProtoArrayForkChoice = ssz_container.into(); + let ssz_container_v10: SszContainerV10 = ssz_container_v7.into(); + let ssz_container: SszContainer = ssz_container_v10.into(); + // `CountUnrealizedFull::default()` represents the count-unrealized-full config which will be overwritten on startup. + let mut fork_choice: ProtoArrayForkChoice = + (ssz_container, CountUnrealizedFull::default()).into(); update_checkpoints::(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db) .map_err(StoreError::SchemaMigrationError)?; @@ -93,6 +102,13 @@ pub(crate) fn update_fork_choice( update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice) .map_err(StoreError::SchemaMigrationError)?; + // Need to downgrade the SSZ container to V7 so that all migrations can be applied in sequence. + let ssz_container = SszContainer::from(&fork_choice); + let ssz_container_v7 = SszContainerV7::from(ssz_container); + + persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes(); + persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; + Ok(()) } @@ -297,8 +313,6 @@ fn update_store_justified_checkpoint( .ok_or("Proto node with current finalized checkpoint not found")?; fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint; - persisted_fork_choice.fork_choice.proto_array_bytes = fork_choice.as_bytes(); - persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint; Ok(()) } diff --git a/beacon_node/beacon_chain/src/schema_change/types.rs b/beacon_node/beacon_chain/src/schema_change/types.rs index 8d41a384f6..02a54c1a3f 100644 --- a/beacon_node/beacon_chain/src/schema_change/types.rs +++ b/beacon_node/beacon_chain/src/schema_change/types.rs @@ -12,7 +12,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); #[superstruct( - variants(V1, V6, V7), + variants(V1, V6, V7, V10), variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)), no_enum )] @@ -30,18 +30,24 @@ pub struct ProtoNode { #[superstruct(only(V1, V6))] pub finalized_epoch: Epoch, #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub justified_checkpoint: Option, #[ssz(with = "four_byte_option_checkpoint")] - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub finalized_checkpoint: Option, pub weight: u64, #[ssz(with = "four_byte_option_usize")] pub best_child: Option, #[ssz(with = "four_byte_option_usize")] pub best_descendant: Option, - #[superstruct(only(V6, V7))] + #[superstruct(only(V6, V7, V10))] pub execution_status: ExecutionStatus, + #[ssz(with = "four_byte_option_checkpoint")] + #[superstruct(only(V10))] + pub unrealized_justified_checkpoint: Option, + #[ssz(with = "four_byte_option_checkpoint")] + #[superstruct(only(V10))] + pub unrealized_finalized_checkpoint: Option, } impl Into for ProtoNodeV1 { @@ -88,9 +94,31 @@ impl Into for ProtoNodeV6 { } } -impl Into for ProtoNodeV7 { - fn into(self) -> ProtoNode { - ProtoNode { +impl Into for ProtoNodeV7 { + fn into(self) -> ProtoNodeV10 { + ProtoNodeV10 { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, + } + } +} + +impl Into for ProtoNodeV10 { + fn into(self) -> ProtoNodeV7 { + ProtoNodeV7 { slot: self.slot, state_root: self.state_root, target_root: self.target_root, @@ -108,8 +136,50 @@ impl Into for ProtoNodeV7 { } } +impl Into for ProtoNodeV10 { + fn into(self) -> ProtoNode { + ProtoNode { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + } + } +} + +impl From for ProtoNodeV7 { + fn from(container: ProtoNode) -> Self { + Self { + slot: container.slot, + state_root: container.state_root, + target_root: container.target_root, + current_epoch_shuffling_id: container.current_epoch_shuffling_id, + next_epoch_shuffling_id: container.next_epoch_shuffling_id, + root: container.root, + parent: container.parent, + justified_checkpoint: container.justified_checkpoint, + finalized_checkpoint: container.finalized_checkpoint, + weight: container.weight, + best_child: container.best_child, + best_descendant: container.best_descendant, + execution_status: container.execution_status, + } + } +} + #[superstruct( - variants(V1, V6, V7), + variants(V1, V6, V7, V10), variant_attributes(derive(Encode, Decode)), no_enum )] @@ -122,9 +192,9 @@ pub struct SszContainer { pub justified_epoch: Epoch, #[superstruct(only(V1, V6))] pub finalized_epoch: Epoch, - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub justified_checkpoint: Checkpoint, - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub finalized_checkpoint: Checkpoint, #[superstruct(only(V1))] pub nodes: Vec, @@ -132,8 +202,10 @@ pub struct SszContainer { pub nodes: Vec, #[superstruct(only(V7))] pub nodes: Vec, + #[superstruct(only(V10))] + pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, - #[superstruct(only(V7))] + #[superstruct(only(V7, V10))] pub previous_proposer_boost: ProposerBoost, } @@ -174,7 +246,41 @@ impl SszContainerV6 { } } -impl Into for SszContainerV7 { +impl Into for SszContainerV7 { + fn into(self) -> SszContainerV10 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV10 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + } + } +} + +impl Into for SszContainerV10 { + fn into(self) -> SszContainerV7 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV7 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + } + } +} + +impl Into for SszContainerV10 { fn into(self) -> SszContainer { let nodes = self.nodes.into_iter().map(Into::into).collect(); @@ -190,3 +296,20 @@ impl Into for SszContainerV7 { } } } + +impl From for SszContainerV7 { + fn from(container: SszContainer) -> Self { + let nodes = container.nodes.into_iter().map(Into::into).collect(); + + Self { + votes: container.votes, + balances: container.balances, + prune_threshold: container.prune_threshold, + justified_checkpoint: container.justified_checkpoint, + finalized_checkpoint: container.finalized_checkpoint, + nodes, + indices: container.indices, + previous_proposer_boost: container.previous_proposer_boost, + } + } +} diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 5a287daf0f..0bbd4419b9 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -47,6 +47,12 @@ impl ShufflingCache { } } +impl Default for ShufflingCache { + fn default() -> Self { + Self::new() + } +} + /// Contains the shuffling IDs for a beacon block. pub struct BlockShufflingIds { pub current: AttestationShufflingId, diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index ed1df94677..7593557396 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -35,6 +35,13 @@ use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, Relative /// for some period of time. const MAX_ADVANCE_DISTANCE: u64 = 4; +/// Similarly for fork choice: avoid the fork choice lookahead during sync. +/// +/// The value is set to 256 since this would be just over one slot (12.8s) when syncing at +/// 20 slots/second. Having a single fork-choice run interrupt syncing would have very little +/// impact whilst having 8 epochs without a block is a comfortable grace period. +const MAX_FORK_CHOICE_DISTANCE: u64 = 256; + #[derive(Debug)] enum Error { BeaconChain(BeaconChainError), @@ -222,28 +229,33 @@ async fn state_advance_timer( let log = log.clone(); let beacon_chain = beacon_chain.clone(); let next_slot = current_slot + 1; - executor.spawn_blocking( - move || { - if let Err(e) = beacon_chain.fork_choice_at_slot(next_slot) { - warn!( - log, - "Error updating fork choice for next slot"; - "error" => ?e, - "slot" => next_slot, - ); + executor.spawn( + async move { + // Don't run fork choice during sync. + if beacon_chain.best_slot() + MAX_FORK_CHOICE_DISTANCE < current_slot { + return; } - // Signal block proposal for the next slot (if it happens to be waiting). - if let Some(tx) = &beacon_chain.fork_choice_signal_tx { - if let Err(e) = tx.notify_fork_choice_complete(next_slot) { - warn!( - log, - "Error signalling fork choice waiter"; - "error" => ?e, - "slot" => next_slot, - ); - } - } + beacon_chain.recompute_head_at_slot(next_slot).await; + + // Use a blocking task to avoid blocking the core executor whilst waiting for locks + // in `ForkChoiceSignalTx`. + beacon_chain.task_executor.clone().spawn_blocking( + move || { + // Signal block proposal for the next slot (if it happens to be waiting). + if let Some(tx) = &beacon_chain.fork_choice_signal_tx { + if let Err(e) = tx.notify_fork_choice_complete(next_slot) { + warn!( + log, + "Error signalling fork choice waiter"; + "error" => ?e, + "slot" => next_slot, + ); + } + } + }, + "fork_choice_advance_signal_tx", + ); }, "fork_choice_advance", ); @@ -261,7 +273,7 @@ fn advance_head( // // Fork-choice is not run *before* this function to avoid unnecessary calls whilst syncing. { - let head_slot = beacon_chain.head_info()?.slot; + let head_slot = beacon_chain.best_slot(); // Don't run this when syncing or if lagging too far behind. if head_slot + MAX_ADVANCE_DISTANCE < current_slot { @@ -272,12 +284,14 @@ fn advance_head( } } - let head_info = beacon_chain.head_info()?; - let head_block_root = head_info.block_root; + let (head_block_root, head_block_state_root) = { + let snapshot = beacon_chain.head_snapshot(); + (snapshot.beacon_block_root, snapshot.beacon_state_root()) + }; let (head_state_root, mut state) = beacon_chain .store - .get_advanced_state(head_block_root, current_slot, head_info.state_root)? + .get_advanced_state(head_block_root, current_slot, head_block_state_root)? .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; if state.slot() == current_slot + 1 { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1fe1cec983..170eba85fe 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -11,10 +11,15 @@ use crate::{ StateSkipConfig, }; use bls::get_withdrawal_credentials; +use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ - test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK}, + auth::JwtKey, + test_utils::{ + ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, + }, ExecutionLayer, }; +use fork_choice::CountUnrealized; use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; @@ -28,9 +33,11 @@ use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::Logger; use slot_clock::TestingSlotClock; +use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::fmt; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -122,8 +129,7 @@ pub fn test_spec() -> ChainSpec { FORK_NAME_ENV_VAR, e ) }); - let fork = ForkName::from_str(fork_name.as_str()) - .unwrap_or_else(|()| panic!("unknown FORK_NAME: {}", fork_name)); + let fork = ForkName::from_str(fork_name.as_str()).unwrap(); fork.make_genesis_spec(E::default_spec()) } else { E::default_spec() @@ -144,8 +150,10 @@ pub struct Builder { store: Option>>, initial_mutator: Option>, store_mutator: Option>, - execution_layer: Option, + execution_layer: Option>, mock_execution_layer: Option>, + mock_builder: Option>, + testing_slot_clock: Option, runtime: TestRuntime, log: Logger, } @@ -203,6 +211,20 @@ impl Builder> { self.store = Some(store); self.store_mutator(Box::new(mutator)) } + + /// Manually restore from a given `MemoryStore`. + pub fn resumed_ephemeral_store( + mut self, + store: Arc, MemoryStore>>, + ) -> Self { + let mutator = move |builder: BeaconChainBuilder<_>| { + builder + .resume_from_db() + .expect("should resume from database") + }; + self.store = Some(store); + self.store_mutator(Box::new(mutator)) + } } impl Builder> { @@ -263,6 +285,8 @@ where store_mutator: None, execution_layer: None, mock_execution_layer: None, + mock_builder: None, + testing_slot_clock: None, runtime, log, } @@ -358,12 +382,46 @@ where DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + None, ); self.execution_layer = Some(mock.el.clone()); self.mock_execution_layer = Some(mock); self } + pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { + // Get a random unused port + let port = unused_port::unused_tcp_port().unwrap(); + let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); + + let spec = self.spec.clone().expect("cannot build without spec"); + let mock_el = MockExecutionLayer::new( + self.runtime.task_executor.clone(), + spec.terminal_total_difficulty, + DEFAULT_TERMINAL_BLOCK, + spec.terminal_block_hash, + spec.terminal_block_hash_activation_epoch, + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + Some(builder_url.clone()), + ) + .move_to_terminal_block(); + + let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); + + self.mock_builder = Some(TestingBuilder::new( + mock_el_url, + builder_url, + beacon_url, + spec, + self.runtime.task_executor.clone(), + )); + self.execution_layer = Some(mock_el.el.clone()); + self.mock_execution_layer = Some(mock_el); + + self + } + /// Instruct the mock execution engine to always return a "valid" response to any payload it is /// asked to execute. pub fn mock_execution_layer_all_payloads_valid(self) -> Self { @@ -375,6 +433,11 @@ where self } + pub fn testing_slot_clock(mut self, slot_clock: TestingSlotClock) -> Self { + self.testing_slot_clock = Some(slot_clock); + self + } + pub fn build(self) -> BeaconChainHarness> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); @@ -415,7 +478,9 @@ where }; // Initialize the slot clock only if it hasn't already been initialized. - builder = if builder.get_slot_clock().is_none() { + builder = if let Some(testing_slot_clock) = self.testing_slot_clock { + builder.slot_clock(testing_slot_clock) + } else if builder.get_slot_clock().is_none() { builder .testing_slot_clock(Duration::from_secs(seconds_per_slot)) .expect("should configure testing slot clock") @@ -432,6 +497,7 @@ where shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, + mock_builder: self.mock_builder.map(Arc::new), rng: make_rng(), } } @@ -450,6 +516,7 @@ pub struct BeaconChainHarness { pub runtime: TestRuntime, pub mock_execution_layer: Option>, + pub mock_builder: Option>>, pub rng: Mutex, } @@ -511,13 +578,40 @@ where } pub fn get_current_state(&self) -> BeaconState { - self.chain.head().unwrap().beacon_state + self.chain.head_beacon_state_cloned() + } + + pub fn get_timestamp_at_slot(&self) -> u64 { + let state = self.get_current_state(); + compute_timestamp_at_slot(&state, &self.spec).unwrap() } pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { - let head = self.chain.head().unwrap(); + let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); - (head.beacon_state, state_root) + (head.beacon_state.clone(), state_root) + } + + pub fn head_slot(&self) -> Slot { + self.chain.canonical_head.cached_head().head_slot() + } + + pub fn head_block_root(&self) -> Hash256 { + self.chain.canonical_head.cached_head().head_block_root() + } + + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.chain + .canonical_head + .cached_head() + .finalized_checkpoint() + } + + pub fn justified_checkpoint(&self) -> Checkpoint { + self.chain + .canonical_head + .cached_head() + .justified_checkpoint() } pub fn get_current_slot(&self) -> Slot { @@ -562,7 +656,7 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } - pub fn make_block( + pub async fn make_block( &self, mut state: BeaconState, slot: Slot, @@ -596,6 +690,7 @@ where Some(graffiti), ProduceBlockVerification::VerifyRandao, ) + .await .unwrap(); let signed_block = block.sign( @@ -610,7 +705,7 @@ where /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after /// caches are built but before the generated block is processed. - pub fn make_block_return_pre_state( + pub async fn make_block_return_pre_state( &self, mut state: BeaconState, slot: Slot, @@ -646,6 +741,7 @@ where Some(graffiti), ProduceBlockVerification::VerifyRandao, ) + .await .unwrap(); let signed_block = block.sign( @@ -1072,6 +1168,19 @@ where } pub fn make_attester_slashing(&self, validator_indices: Vec) -> AttesterSlashing { + self.make_attester_slashing_with_epochs(validator_indices, None, None, None, None) + } + + pub fn make_attester_slashing_with_epochs( + &self, + validator_indices: Vec, + source1: Option, + target1: Option, + source2: Option, + target2: Option, + ) -> AttesterSlashing { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let mut attestation_1 = IndexedAttestation { attesting_indices: VariableList::new(validator_indices).unwrap(), data: AttestationData { @@ -1080,11 +1189,11 @@ where beacon_block_root: Hash256::zero(), target: Checkpoint { root: Hash256::zero(), - epoch: Epoch::new(0), + epoch: target1.unwrap_or(fork.epoch), }, source: Checkpoint { root: Hash256::zero(), - epoch: Epoch::new(0), + epoch: source1.unwrap_or(Epoch::new(0)), }, }, signature: AggregateSignature::infinity(), @@ -1092,12 +1201,13 @@ where let mut attestation_2 = attestation_1.clone(); attestation_2.data.index += 1; + attestation_2.data.source.epoch = source2.unwrap_or(Epoch::new(0)); + attestation_2.data.target.epoch = target2.unwrap_or(fork.epoch); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; - let fork = self.chain.head_info().unwrap().fork; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( @@ -1151,11 +1261,11 @@ where attestation_2.data.index += 1; + let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; - let fork = self.chain.head_info().unwrap().fork; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( @@ -1177,19 +1287,25 @@ where } pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { - let mut block_header_1 = self - .chain - .head_beacon_block() - .unwrap() - .message() - .block_header(); + self.make_proposer_slashing_at_slot(validator_index, None) + } + + pub fn make_proposer_slashing_at_slot( + &self, + validator_index: u64, + slot_override: Option, + ) -> ProposerSlashing { + let mut block_header_1 = self.chain.head_beacon_block().message().block_header(); block_header_1.proposer_index = validator_index; + if let Some(slot) = slot_override { + block_header_1.slot = slot; + } let mut block_header_2 = block_header_1.clone(); block_header_2.state_root = Hash256::zero(); let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; let mut signed_block_headers = vec![block_header_1, block_header_2] @@ -1207,7 +1323,7 @@ where pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { let sk = &self.validator_keypairs[validator_index as usize].sk; - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; VoluntaryExit { @@ -1230,7 +1346,7 @@ where /// Create a new block, apply `block_modifier` to it, sign it and return it. /// /// The state returned is a pre-block state at the same slot as the produced block. - pub fn make_block_with_modifier( + pub async fn make_block_with_modifier( &self, state: BeaconState, slot: Slot, @@ -1239,7 +1355,7 @@ where assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); - let (block, state) = self.make_block_return_pre_state(state, slot); + let (block, state) = self.make_block_return_pre_state(state, slot).await; let (mut block, _) = block.deconstruct(); block_modifier(&mut block); @@ -1327,23 +1443,31 @@ where (deposits, state) } - pub fn process_block( + pub async fn process_block( &self, slot: Slot, block: SignedBeaconBlock, ) -> Result> { self.set_current_slot(slot); - let block_hash: SignedBeaconBlockHash = self.chain.process_block(block)?.into(); - self.chain.fork_choice()?; + let block_hash: SignedBeaconBlockHash = self + .chain + .process_block(Arc::new(block), CountUnrealized::True) + .await? + .into(); + self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } - pub fn process_block_result( + pub async fn process_block_result( &self, block: SignedBeaconBlock, ) -> Result> { - let block_hash: SignedBeaconBlockHash = self.chain.process_block(block)?.into(); - self.chain.fork_choice().unwrap(); + let block_hash: SignedBeaconBlockHash = self + .chain + .process_block(Arc::new(block), CountUnrealized::True) + .await? + .into(); + self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -1382,7 +1506,7 @@ where self.chain .apply_attestation_to_fork_choice(&verified) .unwrap(); - self.chain.add_to_block_inclusion_pool(&verified).unwrap(); + self.chain.add_to_block_inclusion_pool(verified).unwrap(); } } @@ -1398,14 +1522,14 @@ where self.chain.slot_clock.set_slot(slot.into()); } - pub fn add_block_at_slot( + pub async fn add_block_at_slot( &self, slot: Slot, state: BeaconState, ) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock, BeaconState), BlockError> { self.set_current_slot(slot); - let (block, new_state) = self.make_block(state, slot); - let block_hash = self.process_block(slot, block.clone())?; + let (block, new_state) = self.make_block(state, slot).await; + let block_hash = self.process_block(slot, block.clone()).await?; Ok((block_hash, block, new_state)) } @@ -1422,19 +1546,19 @@ where self.process_attestations(attestations); } - pub fn add_attested_block_at_slot( + pub async fn add_attested_block_at_slot( &self, slot: Slot, state: BeaconState, state_root: Hash256, validators: &[usize], ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { - let (block_hash, block, state) = self.add_block_at_slot(slot, state)?; + let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; self.attest_block(&state, state_root, block_hash, &block, validators); Ok((block_hash, state)) } - pub fn add_attested_blocks_at_slots( + pub async fn add_attested_blocks_at_slots( &self, state: BeaconState, state_root: Hash256, @@ -1443,9 +1567,10 @@ where ) -> AddBlocksResult { assert!(!slots.is_empty()); self.add_attested_blocks_at_slots_given_lbh(state, state_root, slots, validators, None) + .await } - fn add_attested_blocks_at_slots_given_lbh( + async fn add_attested_blocks_at_slots_given_lbh( &self, mut state: BeaconState, state_root: Hash256, @@ -1462,6 +1587,7 @@ where for slot in slots { let (block_hash, new_state) = self .add_attested_block_at_slot(*slot, state, state_root, validators) + .await .unwrap(); state = new_state; block_hash_from_slot.insert(*slot, block_hash); @@ -1483,7 +1609,7 @@ where /// epoch at a time. /// /// Chains is a vec of `(state, slots, validators)` tuples. - pub fn add_blocks_on_multiple_chains( + pub async fn add_blocks_on_multiple_chains( &self, chains: Vec<(BeaconState, Vec, Vec)>, ) -> Vec> { @@ -1542,7 +1668,8 @@ where &epoch_slots, &validators, Some(head_block), - ); + ) + .await; block_hashes.extend(new_block_hashes); state_hashes.extend(new_state_hashes); @@ -1591,18 +1718,18 @@ where /// Deprecated: Use make_block() instead /// /// Returns a newly created block, signed by the proposer for the given slot. - pub fn build_block( + pub async fn build_block( &self, state: BeaconState, slot: Slot, _block_strategy: BlockStrategy, ) -> (SignedBeaconBlock, BeaconState) { - self.make_block(state, slot) + self.make_block(state, slot).await } /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. - pub fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { - if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } @@ -1613,7 +1740,7 @@ where .checked_add(1) .unwrap(); - self.extend_slots(num_slots) + self.extend_slots(num_slots).await } /// Uses `Self::extend_chain` to `num_slots` blocks. @@ -1622,8 +1749,8 @@ where /// /// - BlockStrategy::OnCanonicalHead, /// - AttestationStrategy::AllValidators, - pub fn extend_slots(&self, num_slots: usize) -> Hash256 { - if self.chain.slot().unwrap() == self.chain.head_info().unwrap().slot { + pub async fn extend_slots(&self, num_slots: usize) -> Hash256 { + if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } @@ -1632,6 +1759,7 @@ where BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, ) + .await } /// Deprecated: Use add_attested_blocks_at_slots() instead @@ -1645,7 +1773,7 @@ where /// /// The `attestation_strategy` dictates which validators will attest to the newly created /// blocks. - pub fn extend_chain( + pub async fn extend_chain( &self, num_blocks: usize, block_strategy: BlockStrategy, @@ -1680,8 +1808,9 @@ where AttestationStrategy::SomeValidators(vals) => vals, }; let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, last_produced_block_hash, _) = - self.add_attested_blocks_at_slots(state, state_root, &slots, &validators); + let (_, _, last_produced_block_hash, _) = self + .add_attested_blocks_at_slots(state, state_root, &slots, &validators) + .await; last_produced_block_hash.into() } @@ -1695,44 +1824,50 @@ where /// then built `faulty_fork_blocks`. /// /// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain. - pub fn generate_two_forks_by_skipping_a_block( + pub async fn generate_two_forks_by_skipping_a_block( &self, honest_validators: &[usize], faulty_validators: &[usize], honest_fork_blocks: usize, faulty_fork_blocks: usize, ) -> (Hash256, Hash256) { - let initial_head_slot = self - .chain - .head() - .expect("should get head") - .beacon_block - .slot(); + let initial_head_slot = self.chain.head_snapshot().beacon_block.slot(); // Move to the next slot so we may produce some more blocks on the head. self.advance_slot(); // Extend the chain with blocks where only honest validators agree. - let honest_head = self.extend_chain( - honest_fork_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(honest_validators.to_vec()), - ); + let honest_head = self + .extend_chain( + honest_fork_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(honest_validators.to_vec()), + ) + .await; // Go back to the last block where all agreed, and build blocks upon it where only faulty nodes // agree. - let faulty_head = self.extend_chain( - faulty_fork_blocks, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: initial_head_slot, - // `initial_head_slot + 2` means one slot is skipped. - first_slot: initial_head_slot + 2, - }, - AttestationStrategy::SomeValidators(faulty_validators.to_vec()), - ); + let faulty_head = self + .extend_chain( + faulty_fork_blocks, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: initial_head_slot, + // `initial_head_slot + 2` means one slot is skipped. + first_slot: initial_head_slot + 2, + }, + AttestationStrategy::SomeValidators(faulty_validators.to_vec()), + ) + .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); (honest_head, faulty_head) } } + +// Junk `Debug` impl to satistfy certain trait bounds during testing. +impl fmt::Debug for BeaconChainHarness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "BeaconChainHarness") + } +} diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index f168ec258a..9242ef4b36 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -231,6 +231,11 @@ impl MonitoredValidator { } } } + + /// Ensure epoch summary is added to the summaries map + fn touch_epoch_summary(&self, epoch: Epoch) { + self.with_epoch_summary(epoch, |_| {}); + } } /// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P @@ -306,6 +311,7 @@ impl ValidatorMonitor { // Update metrics for individual validators. for monitored_validator in self.validators.values() { if let Some(i) = monitored_validator.index { + monitored_validator.touch_epoch_summary(current_epoch); let i = i as usize; let id = &monitored_validator.id; diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 4141b7dc01..a72168c5f0 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -158,6 +158,11 @@ impl ValidatorPubkeyCache { pub fn len(&self) -> usize { self.indices.len() } + + /// Returns `true` if there are no validators in the cache. + pub fn is_empty(&self) -> bool { + self.indices.is_empty() + } } /// Wrapper for a public key stored in the database. diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index b1d1f71d6c..85e4f1f093 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -3,6 +3,7 @@ use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; use lazy_static::lazy_static; +use std::sync::Arc; use tree_hash::TreeHash; use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot}; @@ -17,8 +18,8 @@ lazy_static! { /// attestation at each slot from genesis through to three epochs past the head. /// /// It checks the produced attestation against some locally computed values. -#[test] -fn produces_attestations() { +#[tokio::test] +async fn produces_attestations() { let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4; let additional_slots_tested = MainnetEthSpec::slots_per_epoch() * 3; @@ -37,11 +38,13 @@ fn produces_attestations() { if slot > 0 && slot <= num_blocks_produced { harness.advance_slot(); - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } let slot = Slot::from(slot); @@ -129,10 +132,20 @@ fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let early_attestation = { - let proto_block = chain.fork_choice.read().get_block(&block_root).unwrap(); + let proto_block = chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_root) + .unwrap(); chain .early_attester_cache - .add_head_block(block_root, block.clone(), proto_block, &state, &chain.spec) + .add_head_block( + block_root, + Arc::new(block.clone()), + proto_block, + &state, + &chain.spec, + ) .unwrap(); chain .early_attester_cache @@ -151,8 +164,8 @@ fn produces_attestations() { /// Ensures that the early attester cache wont create an attestation to a block in a later slot than /// the one requested. -#[test] -fn early_attester_cache_old_request() { +#[tokio::test] +async fn early_attester_cache_old_request() { let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .keypairs(KEYPAIRS[..].to_vec()) @@ -162,18 +175,20 @@ fn early_attester_cache_old_request() { harness.advance_slot(); - harness.extend_chain( - 2, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!(head.beacon_block.slot(), 2); let head_proto_block = harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&head.beacon_block_root) .unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 2fe8818a9a..6a9e604793 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -56,7 +56,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness( chain: &BeaconChain, ) -> (Attestation, usize, usize, SecretKey, SubnetId) { - let head = chain.head().expect("should get head"); + let head = chain.head_snapshot(); let current_slot = chain.slot().expect("should get slot"); let mut valid_attestation = chain @@ -106,7 +106,8 @@ fn get_valid_aggregated_attestation( chain: &BeaconChain, aggregate: Attestation, ) -> (SignedAggregateAndProof, usize, SecretKey) { - let state = &chain.head().expect("should get head").beacon_state; + let head = chain.head_snapshot(); + let state = &head.beacon_state; let current_slot = chain.slot().expect("should get slot"); let committee = state @@ -155,7 +156,8 @@ fn get_non_aggregator( chain: &BeaconChain, aggregate: &Attestation, ) -> (usize, SecretKey) { - let state = &chain.head().expect("should get head").beacon_state; + let head = chain.head_snapshot(); + let state = &head.beacon_state; let current_slot = chain.slot().expect("should get slot"); let committee = state @@ -213,15 +215,17 @@ struct GossipTester { } impl GossipTester { - pub fn new() -> Self { + pub async fn new() -> Self { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); @@ -395,9 +399,10 @@ impl GossipTester { } } /// Tests verification of `SignedAggregateAndProof` from the gossip network. -#[test] -fn aggregated_gossip_verification() { +#[tokio::test] +async fn aggregated_gossip_verification() { GossipTester::new() + .await /* * The following two tests ensure: * @@ -511,8 +516,7 @@ fn aggregated_gossip_verification() { let committee_len = tester .harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .get_beacon_committee(tester.slot(), a.message.aggregate.data.index) .expect("should get committees") @@ -612,7 +616,7 @@ fn aggregated_gossip_verification() { tester.valid_aggregate.message.aggregate.clone(), None, &sk, - &chain.head_info().unwrap().fork, + &chain.canonical_head.cached_head().head_fork(), chain.genesis_validators_root, &chain.spec, ) @@ -669,9 +673,10 @@ fn aggregated_gossip_verification() { } /// Tests the verification conditions for an unaggregated attestation on the gossip network. -#[test] -fn unaggregated_gossip_verification() { +#[tokio::test] +async fn unaggregated_gossip_verification() { GossipTester::new() + .await /* * The following test ensures: * @@ -684,8 +689,7 @@ fn unaggregated_gossip_verification() { a.data.index = tester .harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .get_committee_count_at_slot(a.data.slot) .unwrap() @@ -924,16 +928,18 @@ fn unaggregated_gossip_verification() { /// Ensures that an attestation that skips epochs can still be processed. /// /// This also checks that we can do a state lookup if we don't get a hit from the shuffling cache. -#[test] -fn attestation_that_skips_epochs() { +#[tokio::test] +async fn attestation_that_skips_epochs() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 + 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); let current_epoch = harness.chain.epoch().expect("should get epoch"); @@ -992,16 +998,18 @@ fn attestation_that_skips_epochs() { .expect("should gossip verify attestation that skips slots"); } -#[test] -fn attestation_to_finalized_block() { +#[tokio::test] +async fn attestation_to_finalized_block() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 4 + 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 4 + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let finalized_checkpoint = harness .chain @@ -1067,16 +1075,18 @@ fn attestation_to_finalized_block() { .contains(earlier_block_root)); } -#[test] -fn verify_aggregate_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_aggregate_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); @@ -1124,16 +1134,18 @@ fn verify_aggregate_for_gossip_doppelganger_detection() { .expect("should check if gossip aggregator was observed")); } -#[test] -fn verify_attestation_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_attestation_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); // Extend the chain out a few epochs so we have some chain depth to play with. - harness.extend_chain( - MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 3 - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Advance into a slot where there have not been blocks or attestations produced. harness.advance_slot(); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index f91597c8f6..776faba6c8 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -4,6 +4,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult}; +use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; use slasher::{Config as SlasherConfig, Slasher}; @@ -27,19 +28,18 @@ const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGT lazy_static! { /// A cached set of keys. static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); - - /// A cached set of valid blocks - static ref CHAIN_SEGMENT: Vec> = get_chain_segment(); } -fn get_chain_segment() -> Vec> { +async fn get_chain_segment() -> Vec> { let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - CHAIN_SEGMENT_LENGTH, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + CHAIN_SEGMENT_LENGTH, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness .chain @@ -50,11 +50,14 @@ fn get_chain_segment() -> Vec> { let full_block = harness .chain .store - .make_full_block(&snapshot.beacon_block_root, snapshot.beacon_block) + .make_full_block( + &snapshot.beacon_block_root, + snapshot.beacon_block.as_ref().clone(), + ) .unwrap(); BeaconSnapshot { beacon_block_root: snapshot.beacon_block_root, - beacon_block: full_block, + beacon_block: Arc::new(full_block), beacon_state: snapshot.beacon_state, } }) @@ -75,8 +78,8 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness Vec> { - CHAIN_SEGMENT +fn chain_segment_blocks(chain_segment: &[BeaconSnapshot]) -> Vec>> { + chain_segment .iter() .map(|snapshot| snapshot.beacon_block.clone()) .collect() @@ -110,13 +113,13 @@ fn update_proposal_signatures( .get(proposer_index) .expect("proposer keypair should be available"); - let (block, _) = snapshot.beacon_block.clone().deconstruct(); - snapshot.beacon_block = block.sign( + let (block, _) = snapshot.beacon_block.as_ref().clone().deconstruct(); + snapshot.beacon_block = Arc::new(block.sign( &keypair.sk, &state.fork(), state.genesis_validators_root(), spec, - ); + )); } } @@ -124,17 +127,18 @@ fn update_parent_roots(snapshots: &mut [BeaconSnapshot]) { for i in 0..snapshots.len() { let root = snapshots[i].beacon_block.canonical_root(); if let Some(child) = snapshots.get_mut(i + 1) { - let (mut block, signature) = child.beacon_block.clone().deconstruct(); + let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; - child.beacon_block = SignedBeaconBlock::from_block(block, signature) + child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)) } } } -#[test] -fn chain_segment_full_segment() { +#[tokio::test] +async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT); - let blocks = chain_segment_blocks(); + let chain_segment = get_chain_segment().await; + let blocks = chain_segment_blocks(&chain_segment); harness .chain @@ -144,34 +148,33 @@ fn chain_segment_full_segment() { // Sneak in a little check to ensure we can process empty chain segments. harness .chain - .process_chain_segment(vec![]) + .process_chain_segment(vec![], CountUnrealized::True) + .await .into_block_error() .expect("should import empty chain segment"); harness .chain - .process_chain_segment(blocks.clone()) + .process_chain_segment(blocks.clone(), CountUnrealized::True) + .await .into_block_error() .expect("should import chain segment"); - harness.chain.fork_choice().expect("should run fork choice"); + harness.chain.recompute_head_at_current_slot().await; assert_eq!( - harness - .chain - .head_info() - .expect("should get harness b head") - .block_root, + harness.head_block_root(), blocks.last().unwrap().canonical_root(), "harness should have last block as head" ); } -#[test] -fn chain_segment_varying_chunk_size() { +#[tokio::test] +async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { let harness = get_harness(VALIDATOR_COUNT); - let blocks = chain_segment_blocks(); + let chain_segment = get_chain_segment().await; + let blocks = chain_segment_blocks(&chain_segment); harness .chain @@ -181,44 +184,44 @@ fn chain_segment_varying_chunk_size() { for chunk in blocks.chunks(*chunk_size) { harness .chain - .process_chain_segment(chunk.to_vec()) + .process_chain_segment(chunk.to_vec(), CountUnrealized::True) + .await .into_block_error() .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); } - harness.chain.fork_choice().expect("should run fork choice"); + harness.chain.recompute_head_at_current_slot().await; assert_eq!( - harness - .chain - .head_info() - .expect("should get harness b head") - .block_root, + harness.head_block_root(), blocks.last().unwrap().canonical_root(), "harness should have last block as head" ); } } -#[test] -fn chain_segment_non_linear_parent_roots() { +#[tokio::test] +async fn chain_segment_non_linear_parent_roots() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; + harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); /* * Test with a block removed. */ - let mut blocks = chain_segment_blocks(); + let mut blocks = chain_segment_blocks(&chain_segment); blocks.remove(2); assert!( matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) + .await .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -228,16 +231,17 @@ fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) + .await .into_block_error(), Err(BlockError::NonLinearParentRoots) ), @@ -245,28 +249,30 @@ fn chain_segment_non_linear_parent_roots() { ); } -#[test] -fn chain_segment_non_linear_slots() { +#[tokio::test] +async fn chain_segment_non_linear_slots() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); /* * Test where a child is lower than the parent. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.slot_mut() = Slot::new(0); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) + .await .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -277,16 +283,17 @@ fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks = chain_segment_blocks(); - let (mut block, signature) = blocks[3].clone().deconstruct(); + let mut blocks = chain_segment_blocks(&chain_segment); + let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); - blocks[3] = SignedBeaconBlock::from_block(block, signature); + blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); assert!( matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) + .await .into_block_error(), Err(BlockError::NonLinearSlots) ), @@ -294,7 +301,8 @@ fn chain_segment_non_linear_slots() { ); } -fn assert_invalid_signature( +async fn assert_invalid_signature( + chain_segment: &[BeaconSnapshot], harness: &BeaconChainHarness>, block_index: usize, snapshots: &[BeaconSnapshot], @@ -310,7 +318,8 @@ fn assert_invalid_signature( matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -318,24 +327,35 @@ fn assert_invalid_signature( item ); + // Call fork choice to update cached head (including finalization). + harness.chain.recompute_head_at_current_slot().await; + // Ensure the block will be rejected if imported on its own (without gossip checking). - let ancestor_blocks = CHAIN_SEGMENT + let ancestor_blocks = chain_segment .iter() .take(block_index) .map(|snapshot| snapshot.beacon_block.clone()) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been // imported prior to this test. - let _ = harness.chain.process_chain_segment(ancestor_blocks); + let _ = harness + .chain + .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .await; + harness.chain.recompute_head_at_current_slot().await; + + let process_res = harness + .chain + .process_block( + snapshots[block_index].beacon_block.clone(), + CountUnrealized::True, + ) + .await; assert!( - matches!( - harness - .chain - .process_block(snapshots[block_index].beacon_block.clone()), - Err(BlockError::InvalidSignature) - ), - "should not import individual block with an invalid {} signature", - item + matches!(process_res, Err(BlockError::InvalidSignature)), + "should not import individual block with an invalid {} signature, got: {:?}", + item, + process_res ); // NOTE: we choose not to check gossip verification here. It only checks one signature @@ -346,39 +366,53 @@ fn assert_invalid_signature( // slot) tuple. } -fn get_invalid_sigs_harness() -> BeaconChainHarness> { +async fn get_invalid_sigs_harness( + chain_segment: &[BeaconSnapshot], +) -> BeaconChainHarness> { let harness = get_harness(VALIDATOR_COUNT); harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + .set_slot(chain_segment.last().unwrap().beacon_block.slot().as_u64()); harness } -#[test] -fn invalid_signature_gossip_block() { +#[tokio::test] +async fn invalid_signature_gossip_block() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Ensure the block will be rejected if imported on its own (without gossip checking). - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); - snapshots[block_index].beacon_block = - SignedBeaconBlock::from_block(block.clone(), junk_signature()); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (block, _) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); + snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block( + block.clone(), + junk_signature(), + )); // Import all the ancestors before the `block_index` block. - let ancestor_blocks = CHAIN_SEGMENT + let ancestor_blocks = chain_segment .iter() .take(block_index) .map(|snapshot| snapshot.beacon_block.clone()) .collect(); harness .chain - .process_chain_segment(ancestor_blocks) + .process_chain_segment(ancestor_blocks, CountUnrealized::True) + .await .into_block_error() .expect("should import all blocks prior to the one being tested"); assert!( matches!( harness .chain - .process_block(SignedBeaconBlock::from_block(block, junk_signature())), + .process_block( + Arc::new(SignedBeaconBlock::from_block(block, junk_signature())), + CountUnrealized::True + ) + .await, Err(BlockError::InvalidSignature) ), "should not import individual block with an invalid gossip signature", @@ -386,14 +420,21 @@ fn invalid_signature_gossip_block() { } } -#[test] -fn invalid_signature_block_proposal() { +#[tokio::test] +async fn invalid_signature_block_proposal() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (block, _) = snapshots[block_index].beacon_block.clone().deconstruct(); - snapshots[block_index].beacon_block = - SignedBeaconBlock::from_block(block.clone(), junk_signature()); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (block, _) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); + snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block( + block.clone(), + junk_signature(), + )); let blocks = snapshots .iter() .map(|snapshot| snapshot.beacon_block.clone()) @@ -403,7 +444,8 @@ fn invalid_signature_block_proposal() { matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -412,26 +454,37 @@ fn invalid_signature_block_proposal() { } } -#[test] -fn invalid_signature_randao_reveal() { +#[tokio::test] +async fn invalid_signature_randao_reveal() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); *block.body_mut().randao_reveal_mut() = junk_signature(); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "randao"); + assert_invalid_signature(&chain_segment, &harness, block_index, &snapshots, "randao").await; } } -#[test] -fn invalid_signature_proposer_slashing() { +#[tokio::test] +async fn invalid_signature_proposer_slashing() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); let proposer_slashing = ProposerSlashing { signed_header_1: SignedBeaconBlockHeader { message: block.block_header(), @@ -447,18 +500,27 @@ fn invalid_signature_proposer_slashing() { .proposer_slashings_mut() .push(proposer_slashing) .expect("should update proposer slashing"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "proposer slashing"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "proposer slashing", + ) + .await; } } -#[test] -fn invalid_signature_attester_slashing() { +#[tokio::test] +async fn invalid_signature_attester_slashing() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let indexed_attestation = IndexedAttestation { attesting_indices: vec![0].into(), data: AttestationData { @@ -480,33 +542,58 @@ fn invalid_signature_attester_slashing() { attestation_1: indexed_attestation.clone(), attestation_2: indexed_attestation, }; - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .attester_slashings_mut() .push(attester_slashing) .expect("should update attester slashing"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "attester slashing"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "attester slashing", + ) + .await; } } -#[test] -fn invalid_signature_attestation() { +#[tokio::test] +async fn invalid_signature_attestation() { + let chain_segment = get_chain_segment().await; let mut checked_attestation = false; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); if let Some(attestation) = block.body_mut().attestations_mut().get_mut(0) { attestation.signature = junk_aggregate_signature(); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "attestation"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "attestation", + ) + .await; checked_attestation = true; } } @@ -517,12 +604,13 @@ fn invalid_signature_attestation() { ) } -#[test] -fn invalid_signature_deposit() { +#[tokio::test] +async fn invalid_signature_deposit() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { // Note: an invalid deposit signature is permitted! - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let deposit = Deposit { proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1].into(), data: DepositData { @@ -532,13 +620,18 @@ fn invalid_signature_deposit() { signature: junk_signature().into(), }, }; - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .deposits_mut() .push(deposit) .expect("should update deposit"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); let blocks = snapshots @@ -549,7 +642,8 @@ fn invalid_signature_deposit() { !matches!( harness .chain - .process_chain_segment(blocks) + .process_chain_segment(blocks, CountUnrealized::True) + .await .into_block_error(), Err(BlockError::InvalidSignature) ), @@ -558,13 +652,18 @@ fn invalid_signature_deposit() { } } -#[test] -fn invalid_signature_exit() { +#[tokio::test] +async fn invalid_signature_exit() { + let chain_segment = get_chain_segment().await; for &block_index in BLOCK_INDICES { - let harness = get_invalid_sigs_harness(); - let mut snapshots = CHAIN_SEGMENT.clone(); + let harness = get_invalid_sigs_harness(&chain_segment).await; + let mut snapshots = chain_segment.clone(); let epoch = snapshots[block_index].beacon_state.current_epoch(); - let (mut block, signature) = snapshots[block_index].beacon_block.clone().deconstruct(); + let (mut block, signature) = snapshots[block_index] + .beacon_block + .as_ref() + .clone() + .deconstruct(); block .body_mut() .voluntary_exits_mut() @@ -576,10 +675,18 @@ fn invalid_signature_exit() { signature: junk_signature(), }) .expect("should update deposit"); - snapshots[block_index].beacon_block = SignedBeaconBlock::from_block(block, signature); + snapshots[block_index].beacon_block = + Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots); update_proposal_signatures(&mut snapshots, &harness); - assert_invalid_signature(&harness, block_index, &snapshots, "voluntary exit"); + assert_invalid_signature( + &chain_segment, + &harness, + block_index, + &snapshots, + "voluntary exit", + ) + .await; } } @@ -590,30 +697,36 @@ fn unwrap_err(result: Result) -> E { } } -#[test] -fn block_gossip_verification() { +#[tokio::test] +async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); + let chain_segment = get_chain_segment().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; harness .chain .slot_clock - .set_slot(CHAIN_SEGMENT[block_index].beacon_block.slot().as_u64()); + .set_slot(chain_segment[block_index].beacon_block.slot().as_u64()); // Import the ancestors prior to the block we're testing. - for snapshot in &CHAIN_SEGMENT[0..block_index] { + for snapshot in &chain_segment[0..block_index] { let gossip_verified = harness .chain .verify_block_for_gossip(snapshot.beacon_block.clone()) + .await .expect("should obtain gossip verified block"); harness .chain - .process_block(gossip_verified) + .process_block(gossip_verified, CountUnrealized::True) + .await .expect("should import valid gossip verified block"); } + // Recompute the head to ensure we cache the latest view of fork choice. + harness.chain.recompute_head_at_current_slot().await; + /* * This test ensures that: * @@ -624,15 +737,16 @@ fn block_gossip_verification() { * future blocks for processing at the appropriate slot). */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let expected_block_slot = block.slot() + 1; *block.slot_mut() = expected_block_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::FutureSlot { present_slot, block_slot, @@ -654,21 +768,19 @@ fn block_gossip_verification() { * nodes, etc). */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let expected_finalized_slot = harness - .chain - .head_info() - .expect("should get head info") - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()); *block.slot_mut() = expected_finalized_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::WouldRevertFinalizedSlot { block_slot, finalized_slot, @@ -687,8 +799,9 @@ fn block_gossip_verification() { * proposer_index pubkey. */ - let block = CHAIN_SEGMENT[block_index] + let block = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct() .0; @@ -697,10 +810,11 @@ fn block_gossip_verification() { unwrap_err( harness .chain - .verify_block_for_gossip(SignedBeaconBlock::from_block( + .verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block( block, junk_signature() - )) + ))) + .await ), BlockError::ProposalSignatureInvalid ), @@ -715,15 +829,16 @@ fn block_gossip_verification() { * The block's parent (defined by block.parent_root) passes validation. */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); let parent_root = Hash256::from_low_u64_be(42); *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::ParentUnknown(block) if block.parent_root() == parent_root ), @@ -740,15 +855,16 @@ fn block_gossip_verification() { * store.finalized_checkpoint.root */ - let (mut block, signature) = CHAIN_SEGMENT[block_index] + let (mut block, signature) = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct(); - let parent_root = CHAIN_SEGMENT[0].beacon_block_root; + let parent_root = chain_segment[0].beacon_block_root; *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(SignedBeaconBlock::from_block(block, signature))), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::NotFinalizedDescendant { block_parent_root } if block_parent_root == parent_root ), @@ -766,8 +882,9 @@ fn block_gossip_verification() { * processing while proposers for the block's branch are calculated. */ - let mut block = CHAIN_SEGMENT[block_index] + let mut block = chain_segment[block_index] .beacon_block + .as_ref() .clone() .deconstruct() .0; @@ -779,13 +896,13 @@ fn block_gossip_verification() { *block.proposer_index_mut() = other_proposer; let block = block.sign( &generate_deterministic_keypair(other_proposer as usize).sk, - &harness.chain.head_info().unwrap().fork, + &harness.chain.canonical_head.cached_head().head_fork(), harness.chain.genesis_validators_root, &harness.chain.spec, ); assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block.clone())), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::IncorrectBlockProposer { block, local_shuffling, @@ -797,7 +914,7 @@ fn block_gossip_verification() { // Check to ensure that we registered this is a valid block from this proposer. assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(block.clone())), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::RepeatProposal { proposer, slot, @@ -807,9 +924,9 @@ fn block_gossip_verification() { "should register any valid signature against the proposer, even if the block failed later verification" ); - let block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let block = chain_segment[block_index].beacon_block.clone(); assert!( - harness.chain.verify_block_for_gossip(block).is_ok(), + harness.chain.verify_block_for_gossip(block).await.is_ok(), "the valid block should be processed" ); @@ -822,12 +939,13 @@ fn block_gossip_verification() { * signed_beacon_block.message.slot. */ - let block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let block = chain_segment[block_index].beacon_block.clone(); assert!( matches!( harness .chain .verify_block_for_gossip(block.clone()) + .await .err() .expect("should error when processing known block"), BlockError::RepeatProposal { @@ -840,8 +958,8 @@ fn block_gossip_verification() { ); } -#[test] -fn verify_block_for_gossip_slashing_detection() { +#[tokio::test] +async fn verify_block_for_gossip_slashing_detection() { let slasher_dir = tempdir().unwrap(); let slasher = Arc::new( Slasher::open(SlasherConfig::new(slasher_dir.path().into()), test_logger()).unwrap(), @@ -858,12 +976,25 @@ fn verify_block_for_gossip_slashing_detection() { harness.advance_slot(); let state = harness.get_current_state(); - let (block1, _) = harness.make_block(state.clone(), Slot::new(1)); - let (block2, _) = harness.make_block(state, Slot::new(1)); + let (block1, _) = harness.make_block(state.clone(), Slot::new(1)).await; + let (block2, _) = harness.make_block(state, Slot::new(1)).await; - let verified_block = harness.chain.verify_block_for_gossip(block1).unwrap(); - harness.chain.process_block(verified_block).unwrap(); - unwrap_err(harness.chain.verify_block_for_gossip(block2)); + let verified_block = harness + .chain + .verify_block_for_gossip(Arc::new(block1)) + .await + .unwrap(); + harness + .chain + .process_block(verified_block, CountUnrealized::True) + .await + .unwrap(); + unwrap_err( + harness + .chain + .verify_block_for_gossip(Arc::new(block2)) + .await, + ); // Slasher should have been handed the two conflicting blocks and crafted a slashing. slasher.process_queued(Epoch::new(0)).unwrap(); @@ -875,16 +1006,24 @@ fn verify_block_for_gossip_slashing_detection() { slasher_dir.close().unwrap(); } -#[test] -fn verify_block_for_gossip_doppelganger_detection() { +#[tokio::test] +async fn verify_block_for_gossip_doppelganger_detection() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - let (block, _) = harness.make_block(state.clone(), Slot::new(1)); + let (block, _) = harness.make_block(state.clone(), Slot::new(1)).await; - let verified_block = harness.chain.verify_block_for_gossip(block).unwrap(); + let verified_block = harness + .chain + .verify_block_for_gossip(Arc::new(block)) + .await + .unwrap(); let attestations = verified_block.block.message().body().attestations().clone(); - harness.chain.process_block(verified_block).unwrap(); + harness + .chain + .process_block(verified_block, CountUnrealized::True) + .await + .unwrap(); for att in attestations.iter() { let epoch = att.data.target.epoch; @@ -921,8 +1060,8 @@ fn verify_block_for_gossip_doppelganger_detection() { } } -#[test] -fn add_base_block_to_altair_chain() { +#[tokio::test] +async fn add_base_block_to_altair_chain() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); @@ -940,11 +1079,13 @@ fn add_base_block_to_altair_chain() { harness.advance_slot(); // Build out all the blocks in epoch 0. - harness.extend_chain( - slots_per_epoch as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + slots_per_epoch as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Move into the next empty slot. harness.advance_slot(); @@ -952,7 +1093,7 @@ fn add_base_block_to_altair_chain() { // Produce an Altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (altair_signed_block, _) = harness.make_block(state.clone(), slot); + let (altair_signed_block, _) = harness.make_block(state.clone(), slot).await; let altair_block = &altair_signed_block .as_altair() .expect("test expects an altair block") @@ -1008,7 +1149,8 @@ fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(base_block.clone()) + .verify_block_for_gossip(Arc::new(base_block.clone())) + .await .err() .expect("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { @@ -1021,7 +1163,8 @@ fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .process_block(base_block.clone()) + .process_block(Arc::new(base_block.clone()), CountUnrealized::True) + .await .err() .expect("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { @@ -1032,7 +1175,10 @@ fn add_base_block_to_altair_chain() { // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. assert!(matches!( - harness.chain.process_chain_segment(vec![base_block]), + harness + .chain + .process_chain_segment(vec![Arc::new(base_block)], CountUnrealized::True) + .await, ChainSegmentResult::Failed { imported_blocks: 0, error: BlockError::InconsistentFork(InconsistentFork { @@ -1043,8 +1189,8 @@ fn add_base_block_to_altair_chain() { )); } -#[test] -fn add_altair_block_to_base_chain() { +#[tokio::test] +async fn add_altair_block_to_base_chain() { let mut spec = MainnetEthSpec::default_spec(); // Altair never happens. @@ -1061,11 +1207,13 @@ fn add_altair_block_to_base_chain() { harness.advance_slot(); // Build one block. - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Move into the next empty slot. harness.advance_slot(); @@ -1073,7 +1221,7 @@ fn add_altair_block_to_base_chain() { // Produce an altair block. let state = harness.get_current_state(); let slot = harness.get_current_slot(); - let (base_signed_block, _) = harness.make_block(state.clone(), slot); + let (base_signed_block, _) = harness.make_block(state.clone(), slot).await; let base_block = &base_signed_block .as_base() .expect("test expects a base block") @@ -1130,7 +1278,8 @@ fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(altair_block.clone()) + .verify_block_for_gossip(Arc::new(altair_block.clone())) + .await .err() .expect("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { @@ -1143,7 +1292,8 @@ fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .process_block(altair_block.clone()) + .process_block(Arc::new(altair_block.clone()), CountUnrealized::True) + .await .err() .expect("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { @@ -1154,7 +1304,10 @@ fn add_altair_block_to_base_chain() { // Ensure that it would be impossible to import via `BeaconChain::process_chain_segment`. assert!(matches!( - harness.chain.process_chain_segment(vec![altair_block]), + harness + .chain + .process_chain_segment(vec![Arc::new(altair_block)], CountUnrealized::True) + .await, ChainSegmentResult::Failed { imported_blocks: 0, error: BlockError::InconsistentFork(InconsistentFork { diff --git a/beacon_node/beacon_chain/tests/merge.rs b/beacon_node/beacon_chain/tests/merge.rs index d67ed35f9c..19e8902a3e 100644 --- a/beacon_node/beacon_chain/tests/merge.rs +++ b/beacon_node/beacon_chain/tests/merge.rs @@ -1,7 +1,7 @@ #![cfg(not(debug_assertions))] // Tests run too slow in debug. use beacon_chain::test_utils::BeaconChainHarness; -use execution_layer::test_utils::{generate_pow_block, DEFAULT_TERMINAL_BLOCK}; +use execution_layer::test_utils::{generate_pow_block, Block, DEFAULT_TERMINAL_BLOCK}; use types::*; const VALIDATOR_COUNT: usize = 32; @@ -22,16 +22,17 @@ fn verify_execution_payload_chain(chain: &[FullPayload]) { prev_ep.execution_payload.block_number + 1, ep.execution_payload.block_number ); + assert!(ep.execution_payload.timestamp > prev_ep.execution_payload.timestamp); } prev_ep = Some(ep.clone()); } } -#[test] +#[tokio::test] // TODO(merge): This isn't working cause the non-zero values in `initialize_beacon_state_from_eth1` // are causing failed lookups to the execution node. I need to come back to this. #[should_panic] -fn merge_with_terminal_block_hash_override() { +async fn merge_with_terminal_block_hash_override() { let altair_fork_epoch = Epoch::new(0); let bellatrix_fork_epoch = Epoch::new(0); @@ -70,8 +71,7 @@ fn merge_with_terminal_block_hash_override() { assert!( harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_block .as_merge() .is_ok(), @@ -80,9 +80,9 @@ fn merge_with_terminal_block_hash_override() { let mut execution_payloads = vec![]; for i in 0..E::slots_per_epoch() * 3 { - harness.extend_slots(1); + harness.extend_slots(1).await; - let block = harness.chain.head().unwrap().beacon_block; + let block = &harness.chain.head_snapshot().beacon_block; let execution_payload = block.message().body().execution_payload().unwrap().clone(); if i == 0 { @@ -94,8 +94,8 @@ fn merge_with_terminal_block_hash_override() { verify_execution_payload_chain(execution_payloads.as_slice()); } -#[test] -fn base_altair_merge_with_terminal_block_after_fork() { +#[tokio::test] +async fn base_altair_merge_with_terminal_block_after_fork() { let altair_fork_epoch = Epoch::new(4); let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch()); let bellatrix_fork_epoch = Epoch::new(8); @@ -118,15 +118,15 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Start with the base fork. */ - assert!(harness.chain.head().unwrap().beacon_block.as_base().is_ok()); + assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok()); /* * Do the Altair fork. */ - harness.extend_to_slot(altair_fork_slot); + harness.extend_to_slot(altair_fork_slot).await; - let altair_head = harness.chain.head().unwrap().beacon_block; + let altair_head = &harness.chain.head_snapshot().beacon_block; assert!(altair_head.as_altair().is_ok()); assert_eq!(altair_head.slot(), altair_fork_slot); @@ -134,9 +134,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Do the merge fork, without a terminal PoW block. */ - harness.extend_to_slot(merge_fork_slot); + harness.extend_to_slot(merge_fork_slot).await; - let merge_head = harness.chain.head().unwrap().beacon_block; + let merge_head = &harness.chain.head_snapshot().beacon_block; assert!(merge_head.as_merge().is_ok()); assert_eq!(merge_head.slot(), merge_fork_slot); assert_eq!( @@ -148,9 +148,9 @@ fn base_altair_merge_with_terminal_block_after_fork() { * Next merge block shouldn't include an exec payload. */ - harness.extend_slots(1); + harness.extend_slots(1).await; - let one_after_merge_head = harness.chain.head().unwrap().beacon_block; + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; assert_eq!( *one_after_merge_head .message() @@ -170,14 +170,38 @@ fn base_altair_merge_with_terminal_block_after_fork() { .move_to_terminal_block() .unwrap(); + // Add a slot duration to get to the next slot + let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot; + + harness + .execution_block_generator() + .modify_last_block(|block| { + if let Block::PoW(terminal_block) = block { + terminal_block.timestamp = timestamp; + } + }); + + harness.extend_slots(1).await; + + let one_after_merge_head = &harness.chain.head_snapshot().beacon_block; + assert_eq!( + *one_after_merge_head + .message() + .body() + .execution_payload() + .unwrap(), + FullPayload::default() + ); + assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2); + /* * Next merge block should include an exec payload. */ for _ in 0..4 { - harness.extend_slots(1); + harness.extend_slots(1).await; - let block = harness.chain.head().unwrap().beacon_block; + let block = &harness.chain.head_snapshot().beacon_block; execution_payloads.push(block.message().body().execution_payload().unwrap().clone()); } diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index c9df6aa31d..535fe080a7 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -46,18 +46,20 @@ fn get_harness(store: Arc, validator_count: usize) -> TestHarness { harness } -#[test] -fn voluntary_exit() { +#[tokio::test] +async fn voluntary_exit() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), VALIDATOR_COUNT); let spec = &harness.chain.spec.clone(); - harness.extend_chain( - (E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + (E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let validator_index1 = VALIDATOR_COUNT - 1; let validator_index2 = VALIDATOR_COUNT - 2; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 1aa9844a35..027a708cfa 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,17 +1,28 @@ #![cfg(not(debug_assertions))] +use beacon_chain::otb_verification_service::{ + load_optimistic_transition_blocks, validate_optimistic_transition_blocks, + OptimisticTransitionBlock, +}; use beacon_chain::{ + canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainError, BlockError, ExecutionPayloadError, HeadInfo, StateSkipConfig, - WhenSlotSkipped, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, + BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped, + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, + INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, + test_utils::ExecutionBlockGenerator, ExecutionLayer, ForkChoiceState, PayloadAttributes, }; -use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; +use fork_choice::{ + CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus, +}; use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use slot_clock::SlotClock; +use std::collections::HashMap; +use std::sync::Arc; use std::time::Duration; use task_executor::ShutdownReason; use tree_hash::TreeHash; @@ -29,7 +40,6 @@ enum Payload { }, Syncing, InvalidBlockHash, - InvalidTerminalBlock, } struct InvalidPayloadRig { @@ -39,7 +49,11 @@ struct InvalidPayloadRig { impl InvalidPayloadRig { fn new() -> Self { - let mut spec = E::default_spec(); + let spec = E::default_spec(); + Self::new_with_spec(spec) + } + + fn new_with_spec(mut spec: ChainSpec) -> Self { spec.altair_fork_epoch = Some(Epoch::new(0)); spec.bellatrix_fork_epoch = Some(Epoch::new(0)); @@ -64,7 +78,7 @@ impl InvalidPayloadRig { self } - fn execution_layer(&self) -> ExecutionLayer { + fn execution_layer(&self) -> ExecutionLayer { self.harness.chain.execution_layer.clone().unwrap() } @@ -84,19 +98,23 @@ impl InvalidPayloadRig { fn execution_status(&self, block_root: Hash256) -> ExecutionStatus { self.harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .get_block(&block_root) .unwrap() .execution_status } - fn fork_choice(&self) { - self.harness.chain.fork_choice().unwrap(); + async fn recompute_head(&self) { + self.harness.chain.recompute_head_at_current_slot().await; } - fn head_info(&self) -> HeadInfo { - self.harness.chain.head_info().unwrap() + fn cached_head(&self) -> CachedHead { + self.harness.chain.canonical_head.cached_head() + } + + fn canonical_head(&self) -> &CanonicalHead> { + &self.harness.chain.canonical_head } fn previous_forkchoice_update_params(&self) -> (ForkChoiceState, PayloadAttributes) { @@ -142,23 +160,25 @@ impl InvalidPayloadRig { .block_hash } - fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { - (0..num_blocks) - .map(|_| self.import_block(is_valid.clone())) - .collect() + async fn build_blocks(&mut self, num_blocks: u64, is_valid: Payload) -> Vec { + let mut roots = Vec::with_capacity(num_blocks as usize); + for _ in 0..num_blocks { + roots.push(self.import_block(is_valid.clone()).await); + } + roots } - fn move_to_first_justification(&mut self, is_valid: Payload) { + async fn move_to_first_justification(&mut self, is_valid: Payload) { let slots_till_justification = E::slots_per_epoch() * 3; - self.build_blocks(slots_till_justification, is_valid); + self.build_blocks(slots_till_justification, is_valid).await; - let justified_checkpoint = self.head_info().current_justified_checkpoint; + let justified_checkpoint = self.harness.justified_checkpoint(); assert_eq!(justified_checkpoint.epoch, 2); } /// Import a block while setting the newPayload and forkchoiceUpdated responses to `is_valid`. - fn import_block(&mut self, is_valid: Payload) -> Hash256 { - self.import_block_parametric(is_valid, is_valid, |error| { + async fn import_block(&mut self, is_valid: Payload) -> Hash256 { + self.import_block_parametric(is_valid, is_valid, None, |error| { matches!( error, BlockError::ExecutionPayloadError( @@ -166,6 +186,7 @@ impl InvalidPayloadRig { ) ) }) + .await } fn block_root_at_slot(&self, slot: Slot) -> Option { @@ -178,24 +199,25 @@ impl InvalidPayloadRig { fn validate_manually(&self, block_root: Hash256) { self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_valid_execution_payload(block_root) .unwrap(); } - fn import_block_parametric) -> bool>( + async fn import_block_parametric) -> bool>( &mut self, new_payload_response: Payload, forkchoice_response: Payload, + slot_override: Option, evaluate_error: F, ) -> Hash256 { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); - let head = self.harness.chain.head().unwrap(); - let state = head.beacon_state; - let slot = state.slot() + 1; - let (block, post_state) = self.harness.make_block(state, slot); + let head = self.harness.chain.head_snapshot(); + let state = head.beacon_state.clone_with_only_committee_caches(); + let slot = slot_override.unwrap_or(state.slot() + 1); + let (block, post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); let set_new_payload = |payload: Payload| match payload { @@ -208,16 +230,20 @@ impl InvalidPayloadRig { Payload::Invalid { latest_valid_hash } => { let latest_valid_hash = latest_valid_hash .unwrap_or_else(|| self.block_hash(block.message().parent_root())); - mock_execution_layer - .server - .all_payloads_invalid_on_new_payload(latest_valid_hash) + if latest_valid_hash == ExecutionBlockHash::zero() { + mock_execution_layer + .server + .all_payloads_invalid_terminal_block_on_new_payload() + } else { + mock_execution_layer + .server + .all_payloads_invalid_on_new_payload(latest_valid_hash) + } } + Payload::InvalidBlockHash => mock_execution_layer .server .all_payloads_invalid_block_hash_on_new_payload(), - Payload::InvalidTerminalBlock => mock_execution_layer - .server - .all_payloads_invalid_terminal_block_on_new_payload(), }; let set_forkchoice_updated = |payload: Payload| match payload { Payload::Valid => mock_execution_layer @@ -229,16 +255,20 @@ impl InvalidPayloadRig { Payload::Invalid { latest_valid_hash } => { let latest_valid_hash = latest_valid_hash .unwrap_or_else(|| self.block_hash(block.message().parent_root())); - mock_execution_layer - .server - .all_payloads_invalid_on_forkchoice_updated(latest_valid_hash) + if latest_valid_hash == ExecutionBlockHash::zero() { + mock_execution_layer + .server + .all_payloads_invalid_terminal_block_on_forkchoice_updated() + } else { + mock_execution_layer + .server + .all_payloads_invalid_on_forkchoice_updated(latest_valid_hash) + } } + Payload::InvalidBlockHash => mock_execution_layer .server .all_payloads_invalid_block_hash_on_forkchoice_updated(), - Payload::InvalidTerminalBlock => mock_execution_layer - .server - .all_payloads_invalid_terminal_block_on_forkchoice_updated(), }; match (new_payload_response, forkchoice_response) { @@ -249,7 +279,11 @@ impl InvalidPayloadRig { } else { mock_execution_layer.server.full_payload_verification(); } - let root = self.harness.process_block(slot, block.clone()).unwrap(); + let root = self + .harness + .process_block(slot, block.clone()) + .await + .unwrap(); if self.enable_attestations { let all_validators: Vec = (0..VALIDATOR_COUNT).collect(); @@ -265,11 +299,9 @@ impl InvalidPayloadRig { let execution_status = self.execution_status(root.into()); match forkchoice_response { - Payload::Syncing => assert!(execution_status.is_optimistic()), + Payload::Syncing => assert!(execution_status.is_strictly_optimistic()), Payload::Valid => assert!(execution_status.is_valid_and_post_bellatrix()), - Payload::Invalid { .. } - | Payload::InvalidBlockHash - | Payload::InvalidTerminalBlock => unreachable!(), + Payload::Invalid { .. } | Payload::InvalidBlockHash => unreachable!(), } assert_eq!( @@ -283,18 +315,12 @@ impl InvalidPayloadRig { "block from db must match block imported" ); } - ( - Payload::Invalid { .. } | Payload::InvalidBlockHash | Payload::InvalidTerminalBlock, - _, - ) - | ( - _, - Payload::Invalid { .. } | Payload::InvalidBlockHash | Payload::InvalidTerminalBlock, - ) => { + (Payload::Invalid { .. } | Payload::InvalidBlockHash, _) + | (_, Payload::Invalid { .. } | Payload::InvalidBlockHash) => { set_new_payload(new_payload_response); set_forkchoice_updated(forkchoice_response); - match self.harness.process_block(slot, block) { + match self.harness.process_block(slot, block).await { Err(error) if evaluate_error(&error) => (), Err(other) => { panic!("evaluate_error returned false with {:?}", other) @@ -309,8 +335,12 @@ impl InvalidPayloadRig { } }; - let block_in_forkchoice = - self.harness.chain.fork_choice.read().get_block(&block_root); + let block_in_forkchoice = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block(&block_root); if let Payload::Invalid { .. } = new_payload_response { // A block found to be immediately invalid should not end up in fork choice. assert_eq!(block_in_forkchoice, None); @@ -333,106 +363,130 @@ impl InvalidPayloadRig { block_root } - fn invalidate_manually(&self, block_root: Hash256) { + async fn invalidate_manually(&self, block_root: Hash256) { self.harness .chain .process_invalid_execution_payload(&InvalidationOperation::InvalidateOne { block_root }) + .await .unwrap(); } + + fn assert_get_head_error_contains(&self, s: &str) { + match self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .get_head(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) + { + Err(ForkChoiceError::ProtoArrayError(e)) if e.contains(s) => (), + other => panic!("expected {} error, got {:?}", s, other), + }; + } } /// Simple test of the different import types. -#[test] -fn valid_invalid_syncing() { +#[tokio::test] +async fn valid_invalid_syncing() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); + rig.import_block(Payload::Valid).await; rig.import_block(Payload::Invalid { latest_valid_hash: None, - }); - rig.import_block(Payload::Syncing); + }) + .await; + rig.import_block(Payload::Syncing).await; } /// Ensure that an invalid payload can invalidate its parent too (given the right /// `latest_valid_hash`. -#[test] -fn invalid_payload_invalidates_parent() { +#[tokio::test] +async fn invalid_payload_invalidates_parent() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; let roots = vec![ - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, ]; let latest_valid_hash = rig.block_hash(roots[0]); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; - assert!(rig.execution_status(roots[0]).is_valid_and_post_bellatrix()); + assert!(rig.execution_status(roots[0]).is_strictly_optimistic()); assert!(rig.execution_status(roots[1]).is_invalid()); assert!(rig.execution_status(roots[2]).is_invalid()); - assert_eq!(rig.head_info().block_root, roots[0]); + assert_eq!(rig.harness.head_block_root(), roots[0]); } /// Test invalidation of a payload via the fork choice updated message. /// /// The `invalid_payload` argument determines the type of invalid payload: `Invalid`, /// `InvalidBlockHash`, etc, taking the `latest_valid_hash` as an argument. -fn immediate_forkchoice_update_invalid_test( +async fn immediate_forkchoice_update_invalid_test( invalid_payload: impl FnOnce(Option) -> Payload, ) { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; - let valid_head_root = rig.import_block(Payload::Valid); + let valid_head_root = rig.import_block(Payload::Valid).await; let latest_valid_hash = Some(rig.block_hash(valid_head_root)); // Import a block which returns syncing when supplied via newPayload, and then // invalid when the forkchoice update is sent. - rig.import_block_parametric(Payload::Syncing, invalid_payload(latest_valid_hash), |_| { - false - }); + rig.import_block_parametric( + Payload::Syncing, + invalid_payload(latest_valid_hash), + None, + |_| false, + ) + .await; // The head should be the latest valid block. - assert_eq!(rig.head_info().block_root, valid_head_root); + assert_eq!(rig.harness.head_block_root(), valid_head_root); } -#[test] -fn immediate_forkchoice_update_payload_invalid() { +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid() { immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid { latest_valid_hash, }) + .await } -#[test] -fn immediate_forkchoice_update_payload_invalid_block_hash() { - immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash) +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid_block_hash() { + immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash).await } -#[test] -fn immediate_forkchoice_update_payload_invalid_terminal_block() { - immediate_forkchoice_update_invalid_test(|_| Payload::InvalidTerminalBlock) +#[tokio::test] +async fn immediate_forkchoice_update_payload_invalid_terminal_block() { + immediate_forkchoice_update_invalid_test(|_| Payload::Invalid { + latest_valid_hash: Some(ExecutionBlockHash::zero()), + }) + .await } /// Ensure the client tries to exit when the justified checkpoint is invalidated. -#[test] -fn justified_checkpoint_becomes_invalid() { +#[tokio::test] +async fn justified_checkpoint_becomes_invalid() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - rig.move_to_first_justification(Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + rig.move_to_first_justification(Payload::Syncing).await; - let justified_checkpoint = rig.head_info().current_justified_checkpoint; + let justified_checkpoint = rig.harness.justified_checkpoint(); let parent_root_of_justified = rig .harness .chain @@ -449,14 +503,15 @@ fn justified_checkpoint_becomes_invalid() { let is_valid = Payload::Invalid { latest_valid_hash: Some(parent_hash_of_justified), }; - rig.import_block_parametric(is_valid, is_valid, |error| { + rig.import_block_parametric(is_valid, is_valid, None, |error| { matches!( error, // The block import should fail since the beacon chain knows the justified payload // is invalid. BlockError::BeaconChainError(BeaconChainError::JustifiedPayloadInvalid { .. }) ) - }); + }) + .await; // The beacon chain should have triggered a shutdown. assert_eq!( @@ -468,18 +523,18 @@ fn justified_checkpoint_becomes_invalid() { } /// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block. -#[test] -fn pre_finalized_latest_valid_hash() { +#[tokio::test] +async fn pre_finalized_latest_valid_hash() { let num_blocks = E::slots_per_epoch() * 4; let finalized_epoch = 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks - 1, Payload::Syncing).await); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); let pre_finalized_block_root = rig.block_root_at_slot(Slot::new(1)).unwrap(); let pre_finalized_block_hash = rig.block_hash(pre_finalized_block_root); @@ -490,10 +545,11 @@ fn pre_finalized_latest_valid_hash() { // Import a pre-finalized block. rig.import_block(Payload::Invalid { latest_valid_hash: Some(pre_finalized_block_hash), - }); + }) + .await; // The latest imported block should be the head. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // The beacon chain should *not* have triggered a shutdown. assert_eq!(rig.harness.shutdown_reasons(), vec![]); @@ -505,7 +561,7 @@ fn pre_finalized_latest_valid_hash() { if slot == 1 { assert!(rig.execution_status(root).is_valid_and_post_bellatrix()); } else { - assert!(rig.execution_status(root).is_optimistic()); + assert!(rig.execution_status(root).is_strictly_optimistic()); } } } @@ -513,17 +569,17 @@ fn pre_finalized_latest_valid_hash() { /// Ensure that a `latest_valid_hash` will: /// /// - Invalidate descendants of `latest_valid_root`. -/// - Validate `latest_valid_root` and its ancestors. -#[test] -fn latest_valid_hash_will_validate() { +/// - Will not validate `latest_valid_root` and its ancestors. +#[tokio::test] +async fn latest_valid_hash_will_not_validate() { const LATEST_VALID_SLOT: u64 = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(4, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(4, Payload::Syncing).await); let latest_valid_root = rig .block_root_at_slot(Slot::new(LATEST_VALID_SLOT)) @@ -532,9 +588,10 @@ fn latest_valid_hash_will_validate() { rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; - assert_eq!(rig.head_info().slot, LATEST_VALID_SLOT); + assert_eq!(rig.harness.head_slot(), LATEST_VALID_SLOT); for slot in 0..=5 { let slot = Slot::new(slot); @@ -551,25 +608,27 @@ fn latest_valid_hash_will_validate() { assert!(execution_status.is_invalid()) } else if slot == 0 { assert!(execution_status.is_irrelevant()) - } else { + } else if slot == 1 { assert!(execution_status.is_valid_and_post_bellatrix()) + } else { + assert!(execution_status.is_strictly_optimistic()) } } } /// Check behaviour when the `latest_valid_hash` is a junk value. -#[test] -fn latest_valid_hash_is_junk() { +#[tokio::test] +async fn latest_valid_hash_is_junk() { let num_blocks = E::slots_per_epoch() * 5; let finalized_epoch = 3; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); let mut blocks = vec![]; - blocks.push(rig.import_block(Payload::Valid)); // Import a valid transition block. - blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing)); + blocks.push(rig.import_block(Payload::Valid).await); // Import a valid transition block. + blocks.extend(rig.build_blocks(num_blocks, Payload::Syncing).await); - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); // No service should have triggered a shutdown, yet. assert!(rig.harness.shutdown_reasons().is_empty()); @@ -577,10 +636,11 @@ fn latest_valid_hash_is_junk() { let junk_hash = ExecutionBlockHash::repeat_byte(42); rig.import_block(Payload::Invalid { latest_valid_hash: Some(junk_hash), - }); + }) + .await; // The latest imported block should be the head. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // The beacon chain should *not* have triggered a shutdown. assert_eq!(rig.harness.shutdown_reasons(), vec![]); @@ -592,25 +652,25 @@ fn latest_valid_hash_is_junk() { if slot == 1 { assert!(rig.execution_status(root).is_valid_and_post_bellatrix()); } else { - assert!(rig.execution_status(root).is_optimistic()); + assert!(rig.execution_status(root).is_strictly_optimistic()); } } } /// Check that descendants of invalid blocks are also invalidated. -#[test] -fn invalidates_all_descendants() { +#[tokio::test] +async fn invalidates_all_descendants() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // Apply a block which conflicts with the canonical chain. let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); @@ -621,9 +681,14 @@ fn invalidates_all_descendants() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); - let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); - rig.fork_choice(); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let fork_block_root = rig + .harness + .chain + .process_block(Arc::new(fork_block), CountUnrealized::True) + .await + .unwrap(); + rig.recompute_head().await; // The latest valid hash will be set to the grandparent of the fork block. This means that the // parent of the fork block will become invalid. @@ -638,14 +703,15 @@ fn invalidates_all_descendants() { let latest_valid_hash = rig.block_hash(latest_valid_root); // The new block should not become the head, the old head should remain. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; // The block before the fork should become the head. - assert_eq!(rig.head_info().block_root, latest_valid_root); + assert_eq!(rig.harness.head_block_root(), latest_valid_root); // The fork block should be invalidated, even though it's not an ancestor of the block that // triggered the INVALID response from the EL. @@ -666,9 +732,15 @@ fn invalidates_all_descendants() { } let execution_status = rig.execution_status(root); - if slot <= latest_valid_slot { - // Blocks prior to the latest valid hash are valid. + if slot == 0 { + // Genesis block is pre-bellatrix. + assert!(execution_status.is_irrelevant()); + } else if slot == 1 { + // First slot was imported as valid. assert!(execution_status.is_valid_and_post_bellatrix()); + } else if slot <= latest_valid_slot { + // Blocks prior to and included the latest valid hash are not marked as valid. + assert!(execution_status.is_strictly_optimistic()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); @@ -677,19 +749,19 @@ fn invalidates_all_descendants() { } /// Check that the head will switch after the canonical branch is invalidated. -#[test] -fn switches_heads() { +#[tokio::test] +async fn switches_heads() { let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; let finalized_epoch = 2; let finalized_slot = E::slots_per_epoch() * 2; let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. - let blocks = rig.build_blocks(num_blocks, Payload::Syncing); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + let blocks = rig.build_blocks(num_blocks, Payload::Syncing).await; - assert_eq!(rig.head_info().finalized_checkpoint.epoch, finalized_epoch); - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.finalized_checkpoint().epoch, finalized_epoch); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); // Apply a block which conflicts with the canonical chain. let fork_slot = Slot::new(4 * E::slots_per_epoch() + 3); @@ -700,26 +772,34 @@ fn switches_heads() { .state_at_slot(fork_parent_slot, StateSkipConfig::WithStateRoots) .unwrap(); assert_eq!(fork_parent_state.slot(), fork_parent_slot); - let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot); + let (fork_block, _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); - let fork_block_root = rig.harness.chain.process_block(fork_block).unwrap(); - rig.fork_choice(); + let fork_block_root = rig + .harness + .chain + .process_block(Arc::new(fork_block), CountUnrealized::True) + .await + .unwrap(); + rig.recompute_head().await; let latest_valid_slot = fork_parent_slot; let latest_valid_hash = rig.block_hash(fork_parent_root); // The new block should not become the head, the old head should remain. - assert_eq!(rig.head_info().block_root, *blocks.last().unwrap()); + assert_eq!(rig.harness.head_block_root(), *blocks.last().unwrap()); rig.import_block(Payload::Invalid { latest_valid_hash: Some(latest_valid_hash), - }); + }) + .await; // The fork block should become the head. - assert_eq!(rig.head_info().block_root, fork_block_root); + assert_eq!(rig.harness.head_block_root(), fork_block_root); // The fork block has not yet been validated. - assert!(rig.execution_status(fork_block_root).is_optimistic()); + assert!(rig + .execution_status(fork_block_root) + .is_strictly_optimistic()); for root in blocks { let slot = rig @@ -736,9 +816,15 @@ fn switches_heads() { } let execution_status = rig.execution_status(root); - if slot <= latest_valid_slot { - // Blocks prior to the latest valid hash are valid. + if slot == 0 { + // Genesis block is pre-bellatrix. + assert!(execution_status.is_irrelevant()); + } else if slot == 1 { + // First slot was imported as valid. assert!(execution_status.is_valid_and_post_bellatrix()); + } else if slot <= latest_valid_slot { + // Blocks prior to and included the latest valid hash are not marked as valid. + assert!(execution_status.is_strictly_optimistic()); } else { // Blocks after the latest valid hash are invalid. assert!(execution_status.is_invalid()); @@ -746,17 +832,18 @@ fn switches_heads() { } } -#[test] -fn invalid_during_processing() { +#[tokio::test] +async fn invalid_during_processing() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); let roots = &[ - rig.import_block(Payload::Valid), + rig.import_block(Payload::Valid).await, rig.import_block(Payload::Invalid { latest_valid_hash: None, - }), - rig.import_block(Payload::Valid), + }) + .await, + rig.import_block(Payload::Valid).await, ]; // 0 should be present in the chain. @@ -772,20 +859,20 @@ fn invalid_during_processing() { None ); // 2 should be the head. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[2]); + let head_block_root = rig.harness.head_block_root(); + assert_eq!(head_block_root, roots[2]); } -#[test] -fn invalid_after_optimistic_sync() { +#[tokio::test] +async fn invalid_after_optimistic_sync() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. let mut roots = vec![ - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), - rig.import_block(Payload::Syncing), + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, + rig.import_block(Payload::Syncing).await, ]; for root in &roots { @@ -793,32 +880,35 @@ fn invalid_after_optimistic_sync() { } // 2 should be the head. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[2]); + let head = rig.harness.head_block_root(); + assert_eq!(head, roots[2]); - roots.push(rig.import_block(Payload::Invalid { - latest_valid_hash: Some(rig.block_hash(roots[1])), - })); + roots.push( + rig.import_block(Payload::Invalid { + latest_valid_hash: Some(rig.block_hash(roots[1])), + }) + .await, + ); // Running fork choice is necessary since a block has been invalidated. - rig.fork_choice(); + rig.recompute_head().await; // 1 should be the head, since 2 was invalidated. - let head = rig.harness.chain.head_info().unwrap(); - assert_eq!(head.block_root, roots[1]); + let head = rig.harness.head_block_root(); + assert_eq!(head, roots[1]); } -#[test] -fn manually_validate_child() { +#[tokio::test] +async fn manually_validate_child() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let parent = rig.import_block(Payload::Syncing); - let child = rig.import_block(Payload::Syncing); + let parent = rig.import_block(Payload::Syncing).await; + let child = rig.import_block(Payload::Syncing).await; - assert!(rig.execution_status(parent).is_optimistic()); - assert!(rig.execution_status(child).is_optimistic()); + assert!(rig.execution_status(parent).is_strictly_optimistic()); + assert!(rig.execution_status(child).is_strictly_optimistic()); rig.validate_manually(child); @@ -826,32 +916,32 @@ fn manually_validate_child() { assert!(rig.execution_status(child).is_valid_and_post_bellatrix()); } -#[test] -fn manually_validate_parent() { +#[tokio::test] +async fn manually_validate_parent() { let mut rig = InvalidPayloadRig::new().enable_attestations(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let parent = rig.import_block(Payload::Syncing); - let child = rig.import_block(Payload::Syncing); + let parent = rig.import_block(Payload::Syncing).await; + let child = rig.import_block(Payload::Syncing).await; - assert!(rig.execution_status(parent).is_optimistic()); - assert!(rig.execution_status(child).is_optimistic()); + assert!(rig.execution_status(parent).is_strictly_optimistic()); + assert!(rig.execution_status(child).is_strictly_optimistic()); rig.validate_manually(parent); assert!(rig.execution_status(parent).is_valid_and_post_bellatrix()); - assert!(rig.execution_status(child).is_optimistic()); + assert!(rig.execution_status(child).is_strictly_optimistic()); } -#[test] -fn payload_preparation() { +#[tokio::test] +async fn payload_preparation() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); + rig.import_block(Payload::Valid).await; let el = rig.execution_layer(); - let head = rig.harness.chain.head().unwrap(); + let head = rig.harness.chain.head_snapshot(); let current_slot = rig.harness.chain.slot().unwrap(); assert_eq!(head.beacon_state.slot(), 1); assert_eq!(current_slot, 1); @@ -865,18 +955,19 @@ fn payload_preparation() { let fee_recipient = Address::repeat_byte(99); // Provide preparation data to the EL for `proposer`. - el.update_proposer_preparation_blocking( + el.update_proposer_preparation( Epoch::new(1), &[ProposerPreparationData { validator_index: proposer as u64, fee_recipient, }], ) - .unwrap(); + .await; rig.harness .chain - .prepare_beacon_proposer_blocking() + .prepare_beacon_proposer(rig.harness.chain.slot().unwrap()) + .await .unwrap(); let payload_attributes = PayloadAttributes { @@ -896,15 +987,15 @@ fn payload_preparation() { assert_eq!(rig.previous_payload_attributes(), payload_attributes); } -#[test] -fn invalid_parent() { +#[tokio::test] +async fn invalid_parent() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. // Import a syncing block atop the transition block (we'll call this the "parent block" since we // build another block on it later). - let parent_root = rig.import_block(Payload::Syncing); + let parent_root = rig.import_block(Payload::Syncing).await; let parent_block = rig.harness.get_block(parent_root.into()).unwrap(); let parent_state = rig .harness @@ -914,39 +1005,40 @@ fn invalid_parent() { // Produce another block atop the parent, but don't import yet. let slot = parent_block.slot() + 1; rig.harness.set_current_slot(slot); - let (block, state) = rig.harness.make_block(parent_state, slot); + let (block, state) = rig.harness.make_block(parent_state, slot).await; + let block = Arc::new(block); let block_root = block.canonical_root(); assert_eq!(block.parent_root(), parent_root); // Invalidate the parent block. - rig.invalidate_manually(parent_root); + rig.invalidate_manually(parent_root).await; assert!(rig.execution_status(parent_root).is_invalid()); // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.verify_block_for_gossip(block.clone()), + rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); // Ensure the block built atop an invalid payload is invalid for import. assert!(matches!( - rig.harness.chain.process_block(block.clone()), + rig.harness.chain.process_block(block.clone(), CountUnrealized::True).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); // Ensure the block built atop an invalid payload cannot be imported to fork choice. - let (block, _block_signature) = block.deconstruct(); assert!(matches!( - rig.harness.chain.fork_choice.write().on_block( + rig.harness.chain.canonical_head.fork_choice_write_lock().on_block( slot, - &block, + block.message(), block_root, Duration::from_secs(0), &state, PayloadVerificationStatus::Optimistic, - &rig.harness.chain.spec + &rig.harness.chain.spec, + CountUnrealized::True, ), Err(ForkChoiceError::ProtoArrayError(message)) if message.contains(&format!( @@ -960,21 +1052,26 @@ fn invalid_parent() { } /// Tests to ensure that we will still send a proposer preparation -#[test] -fn payload_preparation_before_transition_block() { +#[tokio::test] +async fn payload_preparation_before_transition_block() { let rig = InvalidPayloadRig::new(); let el = rig.execution_layer(); - let head = rig.harness.chain.head().unwrap(); - let head_info = rig.head_info(); - assert!( - !head_info.is_merge_transition_complete, - "the head block is pre-transition" - ); + // Run the watchdog routine so that the status of the execution engine is set. This ensures + // that we don't end up with `eth_syncing` requests later in this function that will impede + // testing. + el.watchdog_task().await; + + let head = rig.harness.chain.head_snapshot(); assert_eq!( - head_info.execution_payload_block_hash, - Some(ExecutionBlockHash::zero()), - "the head block is post-bellatrix" + head.beacon_block + .message() + .body() + .execution_payload() + .unwrap() + .block_hash(), + ExecutionBlockHash::zero(), + "the head block is post-bellatrix but pre-transition" ); let current_slot = rig.harness.chain.slot().unwrap(); @@ -986,24 +1083,32 @@ fn payload_preparation_before_transition_block() { let fee_recipient = Address::repeat_byte(99); // Provide preparation data to the EL for `proposer`. - el.update_proposer_preparation_blocking( + el.update_proposer_preparation( Epoch::new(0), &[ProposerPreparationData { validator_index: proposer as u64, fee_recipient, }], ) - .unwrap(); + .await; rig.move_to_terminal_block(); rig.harness .chain - .prepare_beacon_proposer_blocking() + .prepare_beacon_proposer(current_slot) + .await .unwrap(); + let forkchoice_update_params = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_forkchoice_update_parameters(); rig.harness .chain - .update_execution_engine_forkchoice_blocking(current_slot) + .update_execution_engine_forkchoice(current_slot, forkchoice_update_params) + .await .unwrap(); let (fork_choice_state, payload_attributes) = rig.previous_forkchoice_update_params(); @@ -1012,22 +1117,22 @@ fn payload_preparation_before_transition_block() { assert_eq!(fork_choice_state.head_block_hash, latest_block_hash); } -#[test] -fn attesting_to_optimistic_head() { +#[tokio::test] +async fn attesting_to_optimistic_head() { let mut rig = InvalidPayloadRig::new(); rig.move_to_terminal_block(); - rig.import_block(Payload::Valid); // Import a valid transition block. + rig.import_block(Payload::Valid).await; // Import a valid transition block. - let root = rig.import_block(Payload::Syncing); + let root = rig.import_block(Payload::Syncing).await; - let head = rig.harness.chain.head().unwrap(); + let head = rig.harness.chain.head_snapshot(); let slot = head.beacon_block.slot(); assert_eq!( head.beacon_block_root, root, "the head should be the latest imported block" ); assert!( - rig.execution_status(root).is_optimistic(), + rig.execution_status(root).is_strictly_optimistic(), "the head should be optimistic" ); @@ -1115,3 +1220,776 @@ fn attesting_to_optimistic_head() { get_aggregated().unwrap(); get_aggregated_by_slot_and_root().unwrap(); } + +/// A helper struct to build out a chain of some configurable length which undergoes the merge +/// transition. +struct OptimisticTransitionSetup { + blocks: Vec>>, + execution_block_generator: ExecutionBlockGenerator, +} + +impl OptimisticTransitionSetup { + async fn new(num_blocks: usize, ttd: u64) -> Self { + let mut spec = E::default_spec(); + spec.terminal_total_difficulty = ttd.into(); + let mut rig = InvalidPayloadRig::new_with_spec(spec).enable_attestations(); + rig.move_to_terminal_block(); + + let mut blocks = Vec::with_capacity(num_blocks); + for _ in 0..num_blocks { + let root = rig.import_block(Payload::Valid).await; + let block = rig.harness.chain.get_block(&root).await.unwrap().unwrap(); + blocks.push(Arc::new(block)); + } + + let execution_block_generator = rig + .harness + .mock_execution_layer + .as_ref() + .unwrap() + .server + .execution_block_generator() + .clone(); + + Self { + blocks, + execution_block_generator, + } + } +} + +/// Build a chain which has optimistically imported a transition block. +/// +/// The initial chain will be built with respect to `block_ttd`, whilst the `rig` which imports the +/// chain will operate with respect to `rig_ttd`. This allows for testing mismatched TTDs. +async fn build_optimistic_chain( + block_ttd: u64, + rig_ttd: u64, + num_blocks: usize, +) -> InvalidPayloadRig { + let OptimisticTransitionSetup { + blocks, + execution_block_generator, + } = OptimisticTransitionSetup::new(num_blocks, block_ttd).await; + // Build a brand-new testing harness. We will apply the blocks from the previous harness to + // this one. + let mut spec = E::default_spec(); + spec.terminal_total_difficulty = rig_ttd.into(); + let rig = InvalidPayloadRig::new_with_spec(spec); + + let spec = &rig.harness.chain.spec; + let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); + + // Ensure all the execution blocks from the first rig are available in the second rig. + *mock_execution_layer.server.execution_block_generator() = execution_block_generator; + + // Make the execution layer respond `SYNCING` to all `newPayload` requests. + mock_execution_layer + .server + .all_payloads_syncing_on_new_payload(true); + // Make the execution layer respond `SYNCING` to all `forkchoiceUpdated` requests. + mock_execution_layer + .server + .all_payloads_syncing_on_forkchoice_updated(); + // Make the execution layer respond `None` to all `getBlockByHash` requests. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_none(); + + let current_slot = std::cmp::max( + blocks[0].slot() + spec.safe_slots_to_import_optimistically, + num_blocks.into(), + ); + rig.harness.set_current_slot(current_slot); + + for block in blocks { + rig.harness + .chain + .process_block(block, CountUnrealized::True) + .await + .unwrap(); + } + + rig.harness.chain.recompute_head_at_current_slot().await; + + // Make the execution layer respond normally to `getBlockByHash` requests. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_natural_value(); + + // Perform some sanity checks to ensure that the transition happened exactly where we expected. + let pre_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(0), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let pre_transition_block = rig + .harness + .chain + .get_block(&pre_transition_block_root) + .await + .unwrap() + .unwrap(); + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + assert_eq!( + pre_transition_block_root, + post_transition_block.parent_root(), + "the blocks form a single chain" + ); + assert!( + pre_transition_block + .message() + .body() + .execution_payload() + .unwrap() + .execution_payload + == <_>::default(), + "the block *has not* undergone the merge transition" + ); + assert!( + post_transition_block + .message() + .body() + .execution_payload() + .unwrap() + .execution_payload + != <_>::default(), + "the block *has* undergone the merge transition" + ); + + // Assert that the transition block was optimistically imported. + // + // Note: we're using the "fallback" check for optimistic status, so if the block was + // pre-finality then we'll just use the optimistic status of the finalized block. + assert!( + rig.harness + .chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&post_transition_block_root) + .unwrap(), + "the transition block should be imported optimistically" + ); + + // Get the mock execution layer to respond to `getBlockByHash` requests normally again. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_natural_value(); + + return rig; +} + +#[tokio::test] +async fn optimistic_transition_block_valid_unfinalized() { + let ttd = 42; + let num_blocks = 16 as usize; + let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + < post_transition_block.slot(), + "the transition block should not be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + valid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .expect("should validate fine"); + // now that the transition block has been validated, it should have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert!( + otbs.is_empty(), + "The valid optimistic transition block should have been removed from the database", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_valid_finalized() { + let ttd = 42; + let num_blocks = 130 as usize; + let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + > post_transition_block.slot(), + "the transition block should be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + valid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .expect("should validate fine"); + // now that the transition block has been validated, it should have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert!( + otbs.is_empty(), + "The valid optimistic transition block should have been removed from the database", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_invalid_unfinalized() { + let block_ttd = 42; + let rig_ttd = 1337; + let num_blocks = 22 as usize; + let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + < post_transition_block.slot(), + "the transition block should not be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + + let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // No shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It shouldn't be known as invalid yet + assert!(!rig + .execution_status(post_transition_block_root) + .is_invalid()); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .unwrap(); + + // Still no shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It should be marked invalid now + assert!(rig + .execution_status(post_transition_block_root) + .is_invalid()); + + // the invalid merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The invalid merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { + let block_ttd = 42; + let rig_ttd = 1337; + let num_blocks = 22 as usize; + let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + < post_transition_block.slot(), + "the transition block should not be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + + let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // No shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It shouldn't be known as invalid yet + assert!(!rig + .execution_status(post_transition_block_root) + .is_invalid()); + + // Make the execution layer respond `None` to all `getBlockByHash` requests to simulate a + // syncing EE. + let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_none(); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .unwrap(); + + // Still no shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + + // It should still be marked as optimistic. + assert!(rig + .execution_status(post_transition_block_root) + .is_strictly_optimistic()); + + // the optimistic merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The optimistic merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // Allow the EL to respond to `getBlockByHash`, as if it has finished syncing. + mock_execution_layer + .server + .all_get_block_by_hash_requests_return_natural_value(); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .unwrap(); + + // Still no shutdown should've been triggered. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + // It should be marked invalid now + assert!(rig + .execution_status(post_transition_block_root) + .is_invalid()); + + // the invalid merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The invalid merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); +} + +#[tokio::test] +async fn optimistic_transition_block_invalid_finalized() { + let block_ttd = 42; + let rig_ttd = 1337; + let num_blocks = 130 as usize; + let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; + + let post_transition_block_root = rig + .harness + .chain + .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) + .unwrap() + .unwrap(); + let post_transition_block = rig + .harness + .chain + .get_block(&post_transition_block_root) + .await + .unwrap() + .unwrap(); + + assert!( + rig.cached_head() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()) + > post_transition_block.slot(), + "the transition block should be finalized" + ); + + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + + assert_eq!( + otbs.len(), + 1, + "There should be one optimistic transition block" + ); + + let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); + + // No shutdown should've been triggered yet. + assert_eq!(rig.harness.shutdown_reasons(), vec![]); + + validate_optimistic_transition_blocks(&rig.harness.chain, otbs) + .await + .expect("should invalidate merge transition block and shutdown the client"); + + // The beacon chain should have triggered a shutdown. + assert_eq!( + rig.harness.shutdown_reasons(), + vec![ShutdownReason::Failure( + INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON + )] + ); + + // the invalid merge transition block should NOT have been removed from the database + let otbs = load_optimistic_transition_blocks(&rig.harness.chain) + .expect("should load optimistic transition block from db"); + assert_eq!( + otbs.len(), + 1, + "The invalid merge transition block should still be in the database", + ); + assert_eq!( + invalid_otb, otbs[0], + "The optimistic transition block stored in the database should be what we expect", + ); +} + +/// Helper for running tests where we generate a chain with an invalid head and then a +/// `fork_block` to recover it. +struct InvalidHeadSetup { + rig: InvalidPayloadRig, + fork_block: Arc>, + invalid_head: CachedHead, +} + +impl InvalidHeadSetup { + async fn new() -> InvalidHeadSetup { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + + // Import blocks until the first time the chain finalizes. + while rig.cached_head().finalized_checkpoint().epoch == 0 { + rig.import_block(Payload::Syncing).await; + } + + let slots_per_epoch = E::slots_per_epoch(); + let start_slot = rig.cached_head().head_slot() + 1; + let mut opt_fork_block = None; + + assert_eq!(start_slot % slots_per_epoch, 1); + for i in 0..slots_per_epoch - 1 { + let slot = start_slot + i; + let slot_offset = slot.as_u64() % slots_per_epoch; + + rig.harness.set_current_slot(slot); + + if slot_offset == slots_per_epoch - 1 { + // Optimistic head block right before epoch boundary. + let is_valid = Payload::Syncing; + rig.import_block_parametric(is_valid, is_valid, Some(slot), |error| { + matches!( + error, + BlockError::ExecutionPayloadError( + ExecutionPayloadError::RejectedByExecutionEngine { .. } + ) + ) + }) + .await; + } else if 3 * slot_offset < 2 * slots_per_epoch { + // Valid block in previous epoch. + rig.import_block(Payload::Valid).await; + } else if slot_offset == slots_per_epoch - 2 { + // Fork block one slot prior to invalid head, not applied immediately. + let parent_state = rig + .harness + .chain + .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) + .unwrap(); + let (fork_block, _) = rig.harness.make_block(parent_state, slot).await; + opt_fork_block = Some(Arc::new(fork_block)); + } else { + // Skipped slot. + }; + } + + let invalid_head = rig.cached_head(); + assert_eq!( + invalid_head.head_slot() % slots_per_epoch, + slots_per_epoch - 1 + ); + + // Advance clock to new epoch to realize the justification of soon-to-be-invalid head block. + rig.harness.set_current_slot(invalid_head.head_slot() + 1); + + // Invalidate the head block. + rig.invalidate_manually(invalid_head.head_block_root()) + .await; + + assert!(rig + .canonical_head() + .head_execution_status() + .unwrap() + .is_invalid()); + + // Finding a new head should fail since the only possible head is not valid. + rig.assert_get_head_error_contains("InvalidBestNode"); + + Self { + rig, + fork_block: opt_fork_block.unwrap(), + invalid_head, + } + } +} + +#[tokio::test] +async fn recover_from_invalid_head_by_importing_blocks() { + let InvalidHeadSetup { + rig, + fork_block, + invalid_head: _, + } = InvalidHeadSetup::new().await; + + // Import the fork block, it should become the head. + rig.harness + .chain + .process_block(fork_block.clone(), CountUnrealized::True) + .await + .unwrap(); + rig.recompute_head().await; + let new_head = rig.cached_head(); + assert_eq!( + new_head.head_block_root(), + fork_block.canonical_root(), + "the fork block should become the head" + ); + + let manual_get_head = rig + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .get_head(rig.harness.chain.slot().unwrap(), &rig.harness.chain.spec) + .unwrap(); + assert_eq!(manual_get_head, new_head.head_block_root()); +} + +#[tokio::test] +async fn recover_from_invalid_head_after_persist_and_reboot() { + let InvalidHeadSetup { + rig, + fork_block: _, + invalid_head, + } = InvalidHeadSetup::new().await; + + let slot_clock = rig.harness.chain.slot_clock.clone(); + + // Forcefully persist the head and fork choice. + rig.harness.chain.persist_head_and_fork_choice().unwrap(); + + let resumed = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .resumed_ephemeral_store(rig.harness.chain.store.clone()) + .mock_execution_layer() + .testing_slot_clock(slot_clock) + .build(); + + // Forget the original rig so we don't accidentally use it again. + drop(rig); + + let resumed_head = resumed.chain.canonical_head.cached_head(); + assert_eq!( + resumed_head.head_block_root(), + invalid_head.head_block_root(), + "the resumed harness should have the invalid block as the head" + ); + assert!( + resumed + .chain + .canonical_head + .fork_choice_read_lock() + .get_block_execution_status(&resumed_head.head_block_root()) + .unwrap() + .is_strictly_optimistic(), + "the invalid block should have become optimistic" + ); +} + +#[tokio::test] +async fn weights_after_resetting_optimistic_status() { + let mut rig = InvalidPayloadRig::new().enable_attestations(); + rig.move_to_terminal_block(); + rig.import_block(Payload::Valid).await; // Import a valid transition block. + + let mut roots = vec![]; + for _ in 0..4 { + roots.push(rig.import_block(Payload::Syncing).await); + } + + rig.recompute_head().await; + let head = rig.cached_head(); + + let original_weights = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .iter_nodes(&head.head_block_root()) + .map(|node| (node.root, node.weight)) + .collect::>(); + + rig.invalidate_manually(roots[1]).await; + + rig.harness + .chain + .canonical_head + .fork_choice_write_lock() + .proto_array_mut() + .set_all_blocks_to_optimistic::(&rig.harness.chain.spec) + .unwrap(); + + let new_weights = rig + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .iter_nodes(&head.head_block_root()) + .map(|node| (node.root, node.weight)) + .collect::>(); + + assert_eq!(original_weights, new_weights); + + // Advance the current slot and run fork choice to remove proposer boost. + rig.harness + .set_current_slot(rig.harness.chain.slot().unwrap() + 1); + rig.recompute_head().await; + + assert_eq!( + rig.harness + .chain + .canonical_head + .fork_choice_read_lock() + .get_block_weight(&head.head_block_root()) + .unwrap(), + head.snapshot.beacon_state.validators()[0].effective_balance, + "proposer boost should be removed from the head block and the vote of a single validator applied" + ); + + // Import a length of chain to ensure the chain can be built atop. + for _ in 0..E::slots_per_epoch() * 4 { + rig.import_block(Payload::Valid).await; + } +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ab179cfd6e..da571e43c3 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -10,6 +10,7 @@ use beacon_chain::{ BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler, WhenSlotSkipped, }; +use fork_choice::CountUnrealized; use lazy_static::lazy_static; use logging::test_logger; use maplit::hashset; @@ -71,18 +72,20 @@ fn get_harness( harness } -#[test] -fn full_participation_no_skips() { +#[tokio::test] +async fn full_participation_no_skips() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, num_blocks_produced); check_split_slot(&harness, store); @@ -90,8 +93,8 @@ fn full_participation_no_skips() { check_iterators(&harness); } -#[test] -fn randomised_skips() { +#[tokio::test] +async fn randomised_skips() { let num_slots = E::slots_per_epoch() * 5; let mut num_blocks_produced = 0; let db_path = tempdir().unwrap(); @@ -103,14 +106,16 @@ fn randomised_skips() { for slot in 1..=num_slots { if rng.gen_bool(0.8) { - harness.extend_chain( - 1, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: Slot::new(head_slot), - first_slot: Slot::new(slot), - }, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: Slot::new(head_slot), + first_slot: Slot::new(slot), + }, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); num_blocks_produced += 1; head_slot = slot; @@ -119,7 +124,7 @@ fn randomised_skips() { } } - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), @@ -132,8 +137,8 @@ fn randomised_skips() { check_iterators(&harness); } -#[test] -fn long_skip() { +#[tokio::test] +async fn long_skip() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); @@ -147,11 +152,13 @@ fn long_skip() { // Having this set lower ensures that we start justifying and finalizing quickly after a skip. let final_blocks = 2 * E::slots_per_epoch() + E::slots_per_epoch() / 2; - harness.extend_chain( - initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, initial_blocks); @@ -161,14 +168,16 @@ fn long_skip() { } // 3. Produce more blocks, establish a new finalized epoch - harness.extend_chain( - final_blocks as usize, - BlockStrategy::ForkCanonicalChainAt { - previous_slot: Slot::new(initial_blocks), - first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), - }, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + final_blocks as usize, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: Slot::new(initial_blocks), + first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), + }, + AttestationStrategy::AllValidators, + ) + .await; check_finalization(&harness, initial_blocks + skip_slots + final_blocks); check_split_slot(&harness, store); @@ -182,8 +191,8 @@ fn long_skip() { /// 1. The chunked vector scheme doesn't attempt to store an incorrect genesis value /// 2. We correctly load the genesis value for all required slots /// NOTE: this test takes about a minute to run -#[test] -fn randao_genesis_storage() { +#[tokio::test] +async fn randao_genesis_storage() { let validator_count = 8; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -194,24 +203,24 @@ fn randao_genesis_storage() { // Check we have a non-trivial genesis value let genesis_value = *harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .get_randao_mix(Epoch::new(0)) .expect("randao mix ok"); assert!(!genesis_value.is_zero()); - harness.extend_chain( - num_slots as usize - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_slots as usize - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // Check that genesis value is still present assert!(harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .randao_mixes() .iter() @@ -220,15 +229,16 @@ fn randao_genesis_storage() { // Then upon adding one more block, it isn't harness.advance_slot(); - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!(harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .randao_mixes() .iter() @@ -242,8 +252,8 @@ fn randao_genesis_storage() { } // Check that closing and reopening a freezer DB restores the split slot to its correct value. -#[test] -fn split_slot_restore() { +#[tokio::test] +async fn split_slot_restore() { let db_path = tempdir().unwrap(); let split_slot = { @@ -252,11 +262,13 @@ fn split_slot_restore() { let num_blocks = 4 * E::slots_per_epoch(); - harness.extend_chain( - num_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; store.get_split_slot() }; @@ -271,8 +283,8 @@ fn split_slot_restore() { // Check attestation processing and `load_epoch_boundary_state` in the presence of a split DB. // This is a bit of a monster test in that it tests lots of different things, but until they're // tested elsewhere, this is as good a place as any. -#[test] -fn epoch_boundary_state_attestation_processing() { +#[tokio::test] +async fn epoch_boundary_state_attestation_processing() { let num_blocks_produced = E::slots_per_epoch() * 5; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -284,13 +296,15 @@ fn epoch_boundary_state_attestation_processing() { let mut late_attestations = vec![]; for _ in 0..num_blocks_produced { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(timely_validators.clone()), - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(timely_validators.clone()), + ) + .await; - let head = harness.chain.head().expect("head ok"); + let head = harness.chain.head_snapshot(); late_attestations.extend(harness.get_unaggregated_attestations( &AttestationStrategy::SomeValidators(late_validators.clone()), &head.beacon_state, @@ -311,12 +325,7 @@ fn epoch_boundary_state_attestation_processing() { for (attestation, subnet_id) in late_attestations.into_iter().flatten() { // If the attestation is pre-finalization it should be rejected. - let finalized_epoch = harness - .chain - .head_info() - .expect("should get head") - .finalized_checkpoint - .epoch; + let finalized_epoch = harness.finalized_checkpoint().epoch; let res = harness .chain @@ -347,8 +356,8 @@ fn epoch_boundary_state_attestation_processing() { } // Test that the `end_slot` for forwards block and state root iterators works correctly. -#[test] -fn forwards_iter_block_and_state_roots_until() { +#[tokio::test] +async fn forwards_iter_block_and_state_roots_until() { let num_blocks_produced = E::slots_per_epoch() * 17; let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -356,13 +365,14 @@ fn forwards_iter_block_and_state_roots_until() { let all_validators = &harness.get_all_validators(); let (mut head_state, mut head_state_root) = harness.get_current_state_and_root(); - let head_block_root = harness.chain.head_info().unwrap().block_root; + let head_block_root = harness.head_block_root(); let mut block_roots = vec![head_block_root]; let mut state_roots = vec![head_state_root]; for slot in (1..=num_blocks_produced).map(Slot::from) { let (block_root, mut state) = harness .add_attested_block_at_slot(slot, head_state, head_state_root, all_validators) + .await .unwrap(); head_state_root = state.update_tree_hash_cache().unwrap(); head_state = state; @@ -412,8 +422,8 @@ fn forwards_iter_block_and_state_roots_until() { test_range(Slot::new(0), head_state.slot()); } -#[test] -fn block_replayer_hooks() { +#[tokio::test] +async fn block_replayer_hooks() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); @@ -428,12 +438,9 @@ fn block_replayer_hooks() { let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - let (_, _, end_block_root, mut end_state) = harness.add_attested_blocks_at_slots( - state.clone(), - state_root, - &block_slots, - &all_validators, - ); + let (_, _, end_block_root, mut end_state) = harness + .add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators) + .await; let blocks = store .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) @@ -491,8 +498,8 @@ fn block_replayer_hooks() { assert_eq!(end_state, replay_state); } -#[test] -fn delete_blocks_and_states() { +#[tokio::test] +async fn delete_blocks_and_states() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let validators_keypairs = @@ -510,7 +517,9 @@ fn delete_blocks_and_states() { let initial_slots: Vec = (1..=unforked_blocks).map(Into::into).collect(); let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); + harness + .add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators) + .await; // Create a fork post-finalization. let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2; @@ -530,20 +539,21 @@ fn delete_blocks_and_states() { let fork1_state = harness.get_current_state(); let fork2_state = fork1_state.clone(); - let results = harness.add_blocks_on_multiple_chains(vec![ - (fork1_state, fork1_slots, honest_validators), - (fork2_state, fork2_slots, faulty_validators), - ]); + let results = harness + .add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, honest_validators), + (fork2_state, fork2_slots, faulty_validators), + ]) + .await; let honest_head = results[0].2; let faulty_head = results[1].2; assert_ne!(honest_head, faulty_head, "forks should be distinct"); - let head_info = harness.chain.head_info().expect("should get head"); - assert_eq!(head_info.slot, unforked_blocks + fork_blocks); + assert_eq!(harness.head_slot(), unforked_blocks + fork_blocks); assert_eq!( - head_info.block_root, + harness.head_block_root(), honest_head.into(), "the honest chain should be the canonical chain", ); @@ -614,7 +624,7 @@ fn delete_blocks_and_states() { // Check that we never produce invalid blocks when there is deep forking that changes the shuffling. // See https://github.com/sigp/lighthouse/issues/845 -fn multi_epoch_fork_valid_blocks_test( +async fn multi_epoch_fork_valid_blocks_test( initial_blocks: usize, num_fork1_blocks_: usize, num_fork2_blocks_: usize, @@ -639,7 +649,9 @@ fn multi_epoch_fork_valid_blocks_test( let initial_slots: Vec = (1..=initial_blocks).map(Into::into).collect(); let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); + harness + .add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators) + .await; } assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT); @@ -657,10 +669,12 @@ fn multi_epoch_fork_valid_blocks_test( .map(Into::into) .collect(); - let results = harness.add_blocks_on_multiple_chains(vec![ - (fork1_state, fork1_slots, fork1_validators), - (fork2_state, fork2_slots, fork2_validators), - ]); + let results = harness + .add_blocks_on_multiple_chains(vec![ + (fork1_state, fork1_slots, fork1_validators), + (fork2_state, fork2_slots, fork2_validators), + ]) + .await; let head1 = results[0].2; let head2 = results[1].2; @@ -669,43 +683,47 @@ fn multi_epoch_fork_valid_blocks_test( } // This is the minimal test of block production with different shufflings. -#[test] -fn block_production_different_shuffling_early() { +#[tokio::test] +async fn block_production_different_shuffling_early() { let slots_per_epoch = E::slots_per_epoch() as usize; multi_epoch_fork_valid_blocks_test( slots_per_epoch - 2, slots_per_epoch + 3, slots_per_epoch + 3, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; } -#[test] -fn block_production_different_shuffling_long() { +#[tokio::test] +async fn block_production_different_shuffling_long() { let slots_per_epoch = E::slots_per_epoch() as usize; multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch - 2, 3 * slots_per_epoch, 3 * slots_per_epoch, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; } // Check that the op pool safely includes multiple attestations per block when necessary. // This checks the correctness of the shuffling compatibility memoization. -#[test] -fn multiple_attestations_per_block() { +#[tokio::test] +async fn multiple_attestations_per_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store, HIGH_VALIDATOR_COUNT); - harness.extend_chain( - E::slots_per_epoch() as usize * 3, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + E::slots_per_epoch() as usize * 3, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); let committees_per_slot = head .beacon_state .get_committee_count_at_slot(head.beacon_state.slot()) @@ -717,8 +735,8 @@ fn multiple_attestations_per_block() { assert_eq!( snapshot .beacon_block - .deconstruct() - .0 + .as_ref() + .message() .body() .attestations() .len() as u64, @@ -727,18 +745,20 @@ fn multiple_attestations_per_block() { } } -#[test] -fn shuffling_compatible_linear_chain() { +#[tokio::test] +async fn shuffling_compatible_linear_chain() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. - let head_block_root = harness.extend_chain( - 4 * E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let head_block_root = harness + .extend_chain( + 4 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_shuffling_compatible( &harness, @@ -751,25 +771,29 @@ fn shuffling_compatible_linear_chain() { ); } -#[test] -fn shuffling_compatible_missing_pivot_block() { +#[tokio::test] +async fn shuffling_compatible_missing_pivot_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); // Skip the block at the end of the first epoch. - harness.extend_chain( - E::slots_per_epoch() as usize - 2, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + E::slots_per_epoch() as usize - 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); harness.advance_slot(); - let head_block_root = harness.extend_chain( - 2 * E::slots_per_epoch() as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + let head_block_root = harness + .extend_chain( + 2 * E::slots_per_epoch() as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; check_shuffling_compatible( &harness, @@ -782,15 +806,16 @@ fn shuffling_compatible_missing_pivot_block() { ); } -#[test] -fn shuffling_compatible_simple_fork() { +#[tokio::test] +async fn shuffling_compatible_simple_fork() { let slots_per_epoch = E::slots_per_epoch() as usize; let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch, 3 * slots_per_epoch, 3 * slots_per_epoch, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); @@ -803,15 +828,16 @@ fn shuffling_compatible_simple_fork() { drop(db_path); } -#[test] -fn shuffling_compatible_short_fork() { +#[tokio::test] +async fn shuffling_compatible_short_fork() { let slots_per_epoch = E::slots_per_epoch() as usize; let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test( 2 * slots_per_epoch - 2, slots_per_epoch + 2, slots_per_epoch + 2, LOW_VALIDATOR_COUNT / 2, - ); + ) + .await; let head1_state = get_state_for_block(&harness, head1); let head2_state = get_state_for_block(&harness, head2); @@ -916,8 +942,8 @@ fn check_shuffling_compatible( } // Ensure blocks from abandoned forks are pruned from the Hot DB -#[test] -fn prunes_abandoned_fork_between_two_finalized_checkpoints() { +#[tokio::test] +async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -940,7 +966,8 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { state_root, &canonical_chain_slots, &honest_validators, - ); + ) + .await; state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); @@ -948,12 +975,14 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .map(Slot::new) .collect(); let (current_state, current_state_root) = rig.get_current_state_and_root(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - current_state, - current_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + current_state, + current_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Precondition: Ensure all stray_blocks blocks are still known for &block_hash in stray_blocks.values() { @@ -983,12 +1012,9 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (canonical_chain_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - state, - state_root, - &finalization_slots, - &honest_validators, - ); + let (canonical_chain_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &finalization_slots, &honest_validators) + .await; // Postcondition: New blocks got finalized assert_eq!( @@ -1026,8 +1052,8 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { assert!(!rig.chain.knows_head(&stray_head)); } -#[test] -fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { +#[tokio::test] +async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1046,12 +1072,14 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { // Fill up 0th epoch let canonical_chain_slots_zeroth_epoch: Vec = (1..rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (_, _, _, mut state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &canonical_chain_slots_zeroth_epoch, - &honest_validators, - ); + let (_, _, _, mut state) = rig + .add_attested_blocks_at_slots( + state, + state_root, + &canonical_chain_slots_zeroth_epoch, + &honest_validators, + ) + .await; // Fill up 1st epoch let canonical_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) @@ -1065,7 +1093,8 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { state_root, &canonical_chain_slots_first_epoch, &honest_validators, - ); + ) + .await; let canonical_chain_slot: u64 = rig.get_current_slot().into(); let stray_chain_slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 2 @@ -1073,12 +1102,14 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - state.clone(), - state_root, - &stray_chain_slots_first_epoch, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &stray_chain_slots_first_epoch, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1112,12 +1143,9 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (canonical_chain_blocks, _, _, _) = rig.add_attested_blocks_at_slots( - state, - state_root, - &finalization_slots, - &honest_validators, - ); + let (canonical_chain_blocks, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &finalization_slots, &honest_validators) + .await; // Postconditions assert_eq!( @@ -1156,8 +1184,8 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { assert!(get_blocks(&chain_dump).contains(&shared_head)); } -#[test] -fn pruning_does_not_touch_blocks_prior_to_finalization() { +#[tokio::test] +async fn pruning_does_not_touch_blocks_prior_to_finalization() { const HONEST_VALIDATOR_COUNT: usize = 16; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1175,12 +1203,9 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_chain_blocks, _, _, new_state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &zeroth_epoch_slots, - &honest_validators, - ); + let (canonical_chain_blocks, _, _, new_state) = rig + .add_attested_blocks_at_slots(state, state_root, &zeroth_epoch_slots, &honest_validators) + .await; state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); @@ -1189,12 +1214,14 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - state.clone(), - state_root, - &first_epoch_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, stray_head, _) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &first_epoch_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1222,8 +1249,9 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { .map(Slot::new) .collect(); let state_root = state.update_tree_hash_cache().unwrap(); - let (_, _, _, _) = - rig.add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators); + let (_, _, _, _) = rig + .add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators) + .await; // Postconditions assert_eq!( @@ -1251,8 +1279,8 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { assert!(rig.chain.knows_head(&stray_head)); } -#[test] -fn prunes_fork_growing_past_youngest_finalized_checkpoint() { +#[tokio::test] +async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1269,12 +1297,9 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig.add_attested_blocks_at_slots( - state, - state_root, - &zeroth_epoch_slots, - &honest_validators, - ); + let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig + .add_attested_blocks_at_slots(state, state_root, &zeroth_epoch_slots, &honest_validators) + .await; // Fill up 1st epoch. Contains a fork. let slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 1..rig.epoch_start_slot(2)) @@ -1287,9 +1312,11 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { state_root, &slots_first_epoch, &adversarial_validators, - ); - let (canonical_blocks_first_epoch, _, _, mut canonical_state) = - rig.add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators); + ) + .await; + let (canonical_blocks_first_epoch, _, _, mut canonical_state) = rig + .add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators) + .await; // Fill up 2nd epoch. Extends both the canonical chain and the fork. let stray_slots_second_epoch: Vec = (rig.epoch_start_slot(2) @@ -1303,7 +1330,8 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { stray_state_root, &stray_slots_second_epoch, &adversarial_validators, - ); + ) + .await; // Precondition: Ensure all stray_blocks blocks are still known let stray_blocks: HashMap = stray_blocks_first_epoch @@ -1343,12 +1371,14 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1394,8 +1424,8 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { } // This is to check if state outside of normal block processing are pruned correctly. -#[test] -fn prunes_skipped_slots_states() { +#[tokio::test] +async fn prunes_skipped_slots_states() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1418,7 +1448,8 @@ fn prunes_skipped_slots_states() { state_root, &canonical_slots_zeroth_epoch, &honest_validators, - ); + ) + .await; let skipped_slot: Slot = (rig.epoch_start_slot(1) + 1).into(); @@ -1426,12 +1457,14 @@ fn prunes_skipped_slots_states() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( - canonical_state.clone(), - canonical_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, _, stray_state) = rig + .add_attested_blocks_at_slots( + canonical_state.clone(), + canonical_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1469,12 +1502,14 @@ fn prunes_skipped_slots_states() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1518,8 +1553,8 @@ fn prunes_skipped_slots_states() { } // This is to check if state outside of normal block processing are pruned correctly. -#[test] -fn finalizes_non_epoch_start_slot() { +#[tokio::test] +async fn finalizes_non_epoch_start_slot() { const HONEST_VALIDATOR_COUNT: usize = 16 + 0; const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0; const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT; @@ -1542,7 +1577,8 @@ fn finalizes_non_epoch_start_slot() { state_root, &canonical_slots_zeroth_epoch, &honest_validators, - ); + ) + .await; let skipped_slot: Slot = rig.epoch_start_slot(1).into(); @@ -1550,12 +1586,14 @@ fn finalizes_non_epoch_start_slot() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( - canonical_state.clone(), - canonical_state_root, - &stray_slots, - &adversarial_validators, - ); + let (stray_blocks, stray_states, _, stray_state) = rig + .add_attested_blocks_at_slots( + canonical_state.clone(), + canonical_state_root, + &stray_slots, + &adversarial_validators, + ) + .await; // Preconditions for &block_hash in stray_blocks.values() { @@ -1593,12 +1631,14 @@ fn finalizes_non_epoch_start_slot() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ); + let (canonical_blocks_post_finalization, _, _, _) = rig + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1702,14 +1742,14 @@ fn check_no_blocks_exist<'a>( } } -#[test] -fn prune_single_block_fork() { +#[tokio::test] +async fn prune_single_block_fork() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1); + pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1).await; } -#[test] -fn prune_single_block_long_skip() { +#[tokio::test] +async fn prune_single_block_long_skip() { let slots_per_epoch = E::slots_per_epoch(); pruning_test( 2 * slots_per_epoch, @@ -1717,11 +1757,12 @@ fn prune_single_block_long_skip() { 2 * slots_per_epoch, 2 * slots_per_epoch as u64, 1, - ); + ) + .await; } -#[test] -fn prune_shared_skip_states_mid_epoch() { +#[tokio::test] +async fn prune_shared_skip_states_mid_epoch() { let slots_per_epoch = E::slots_per_epoch(); pruning_test( slots_per_epoch + slots_per_epoch / 2, @@ -1729,39 +1770,43 @@ fn prune_shared_skip_states_mid_epoch() { slots_per_epoch, 2, slots_per_epoch - 1, - ); + ) + .await; } -#[test] -fn prune_shared_skip_states_epoch_boundaries() { +#[tokio::test] +async fn prune_shared_skip_states_epoch_boundaries() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch); - pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch); + pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch).await; + pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch).await; pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, slots_per_epoch as u64 / 2, slots_per_epoch, slots_per_epoch as u64 / 2 + 1, slots_per_epoch, - ); + ) + .await; pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, slots_per_epoch as u64 / 2, slots_per_epoch, slots_per_epoch as u64 / 2 + 1, slots_per_epoch, - ); + ) + .await; pruning_test( 2 * slots_per_epoch - 1, slots_per_epoch as u64, 1, 0, 2 * slots_per_epoch, - ); + ) + .await; } /// Generic harness for pruning tests. -fn pruning_test( +async fn pruning_test( // Number of blocks to start the chain with before forking. num_initial_blocks: u64, // Number of skip slots on the main chain after the initial blocks. @@ -1793,30 +1838,34 @@ fn pruning_test( let start_slot = Slot::new(1); let divergence_slot = start_slot + num_initial_blocks; let (state, state_root) = harness.get_current_state_and_root(); - let (_, _, _, divergence_state) = harness.add_attested_blocks_at_slots( - state, - state_root, - &slots(start_slot, num_initial_blocks)[..], - &honest_validators, - ); + let (_, _, _, divergence_state) = harness + .add_attested_blocks_at_slots( + state, + state_root, + &slots(start_slot, num_initial_blocks)[..], + &honest_validators, + ) + .await; - let mut chains = harness.add_blocks_on_multiple_chains(vec![ - // Canonical chain - ( - divergence_state.clone(), - slots( - divergence_slot + num_canonical_skips, - num_canonical_middle_blocks, + let mut chains = harness + .add_blocks_on_multiple_chains(vec![ + // Canonical chain + ( + divergence_state.clone(), + slots( + divergence_slot + num_canonical_skips, + num_canonical_middle_blocks, + ), + honest_validators.clone(), ), - honest_validators.clone(), - ), - // Fork chain - ( - divergence_state.clone(), - slots(divergence_slot + num_fork_skips, num_fork_blocks), - faulty_validators, - ), - ]); + // Fork chain + ( + divergence_state.clone(), + slots(divergence_slot + num_fork_skips, num_fork_blocks), + faulty_validators, + ), + ]) + .await; let (_, _, _, mut canonical_state) = chains.remove(0); let (stray_blocks, stray_states, _, stray_head_state) = chains.remove(0); @@ -1842,20 +1891,19 @@ fn pruning_test( let num_finalization_blocks = 4 * E::slots_per_epoch(); let canonical_slot = divergence_slot + num_canonical_skips + num_canonical_middle_blocks; let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - harness.add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &slots(canonical_slot, num_finalization_blocks), - &honest_validators, - ); + harness + .add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &slots(canonical_slot, num_finalization_blocks), + &honest_validators, + ) + .await; // Check that finalization has advanced past the divergence slot. assert!( harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint + .finalized_checkpoint() .epoch .start_slot(E::slots_per_epoch()) > divergence_slot @@ -1884,44 +1932,49 @@ fn delete_states_from_failed_block() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let slots_per_epoch = E::slots_per_epoch(); - let genesis_state = harness.get_current_state(); - let block_slot = Slot::new(2 * slots_per_epoch); - let (signed_block, state) = harness.make_block(genesis_state, block_slot); + // Use a `block_on_dangerous` rather than an async test to stop spawned processes from holding + // a reference to the store. + harness.chain.task_executor.clone().block_on_dangerous( + async move { + let slots_per_epoch = E::slots_per_epoch(); - let (mut block, _) = signed_block.deconstruct(); + let genesis_state = harness.get_current_state(); + let block_slot = Slot::new(2 * slots_per_epoch); + let (signed_block, state) = harness.make_block(genesis_state, block_slot).await; - // Mutate the block to make it invalid, and re-sign it. - *block.state_root_mut() = Hash256::repeat_byte(0xff); - let proposer_index = block.proposer_index() as usize; - let block = block.sign( - &harness.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), - &harness.spec, + let (mut block, _) = signed_block.deconstruct(); + + // Mutate the block to make it invalid, and re-sign it. + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = block.sign( + &harness.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + ); + + // The block should be rejected, but should store a bunch of temporary states. + harness.set_current_slot(block_slot); + harness.process_block_result(block).await.unwrap_err(); + + assert_eq!( + store.iter_temporary_state_roots().count(), + block_slot.as_usize() - 1 + ); + }, + "test", ); - // The block should be rejected, but should store a bunch of temporary states. - harness.set_current_slot(block_slot); - harness.process_block_result(block).unwrap_err(); - - assert_eq!( - store.iter_temporary_state_roots().count(), - block_slot.as_usize() - 1 - ); - - drop(harness); - drop(store); - // On startup, the store should garbage collect all the temporary states. let store = get_store(&db_path); assert_eq!(store.iter_temporary_state_roots().count(), 0); } */ -#[test] -fn weak_subjectivity_sync() { +#[tokio::test] +async fn weak_subjectivity_sync() { // Build an initial chain on one harness, representing a synced node with full history. let num_initial_blocks = E::slots_per_epoch() * 11; let num_final_blocks = E::slots_per_epoch() * 2; @@ -1930,17 +1983,19 @@ fn weak_subjectivity_sync() { let full_store = get_store(&temp1); let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); - harness.extend_chain( - num_initial_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let genesis_state = full_store .get_state(&harness.chain.genesis_state_root, Some(Slot::new(0))) .unwrap() .unwrap(); - let wss_checkpoint = harness.chain.head_info().unwrap().finalized_checkpoint; + let wss_checkpoint = harness.finalized_checkpoint(); let wss_block = harness .chain .store @@ -1955,11 +2010,13 @@ fn weak_subjectivity_sync() { // Add more blocks that advance finalization further. harness.advance_slot(); - harness.extend_chain( - num_final_blocks as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); let log = test_logger(); @@ -1973,6 +2030,7 @@ fn weak_subjectivity_sync() { BeaconChainBuilder::new(MinimalEthSpec) .store(store.clone()) .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) .weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state) .unwrap() .logger(log.clone()) @@ -2003,12 +2061,15 @@ fn weak_subjectivity_sync() { let full_block = harness .chain .store - .make_full_block(&snapshot.beacon_block_root, block.clone()) + .make_full_block(&snapshot.beacon_block_root, block.as_ref().clone()) .unwrap(); beacon_chain.slot_clock.set_slot(block.slot().as_u64()); - beacon_chain.process_block(full_block).unwrap(); - beacon_chain.fork_choice().unwrap(); + beacon_chain + .process_block(Arc::new(full_block), CountUnrealized::True) + .await + .unwrap(); + beacon_chain.recompute_head_at_current_slot().await; // Check that the new block's state can be loaded correctly. let state_root = block.state_root(); @@ -2102,8 +2163,8 @@ fn weak_subjectivity_sync() { assert_eq!(store.get_anchor_slot(), None); } -#[test] -fn finalizes_after_resuming_from_db() { +#[tokio::test] +async fn finalizes_after_resuming_from_db() { let validator_count = 16; let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8; let first_half = num_blocks_produced / 2; @@ -2120,17 +2181,18 @@ fn finalizes_after_resuming_from_db() { harness.advance_slot(); - harness.extend_chain( - first_half as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + first_half as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!( harness .chain - .head() - .expect("should read head") + .head_snapshot() .beacon_state .finalized_checkpoint() .epoch @@ -2172,17 +2234,15 @@ fn finalizes_after_resuming_from_db() { .slot_clock .set_slot(latest_slot.as_u64() + 1); - resumed_harness.extend_chain( - (num_blocks_produced - first_half) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + resumed_harness + .extend_chain( + (num_blocks_produced - first_half) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &resumed_harness - .chain - .head() - .expect("should read head") - .beacon_state; + let state = &resumed_harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), num_blocks_produced, @@ -2205,8 +2265,8 @@ fn finalizes_after_resuming_from_db() { ); } -#[test] -fn revert_minority_fork_on_resume() { +#[tokio::test] +async fn revert_minority_fork_on_resume() { let validator_count = 16; let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); @@ -2262,17 +2322,17 @@ fn revert_minority_fork_on_resume() { harness1.process_attestations(attestations.clone()); harness2.process_attestations(attestations); - let (block, new_state) = harness1.make_block(state, slot); + let (block, new_state) = harness1.make_block(state, slot).await; - harness1.process_block(slot, block.clone()).unwrap(); - harness2.process_block(slot, block.clone()).unwrap(); + harness1.process_block(slot, block.clone()).await.unwrap(); + harness2.process_block(slot, block.clone()).await.unwrap(); state = new_state; block_root = block.canonical_root(); } - assert_eq!(harness1.chain.head_info().unwrap().slot, fork_slot - 1); - assert_eq!(harness2.chain.head_info().unwrap().slot, fork_slot - 1); + assert_eq!(harness1.head_slot(), fork_slot - 1); + assert_eq!(harness2.head_slot(), fork_slot - 1); // Fork the two chains. let mut state1 = state.clone(); @@ -2297,13 +2357,13 @@ fn revert_minority_fork_on_resume() { harness2.process_attestations(attestations); // Minority chain block (no attesters). - let (block1, new_state1) = harness1.make_block(state1, slot); - harness1.process_block(slot, block1).unwrap(); + let (block1, new_state1) = harness1.make_block(state1, slot).await; + harness1.process_block(slot, block1).await.unwrap(); state1 = new_state1; // Majority chain block (all attesters). - let (block2, new_state2) = harness2.make_block(state2, slot); - harness2.process_block(slot, block2.clone()).unwrap(); + let (block2, new_state2) = harness2.make_block(state2, slot).await; + harness2.process_block(slot, block2.clone()).await.unwrap(); state2 = new_state2; block_root = block2.canonical_root(); @@ -2312,8 +2372,8 @@ fn revert_minority_fork_on_resume() { } let end_slot = fork_slot + post_fork_blocks - 1; - assert_eq!(harness1.chain.head_info().unwrap().slot, end_slot); - assert_eq!(harness2.chain.head_info().unwrap().slot, end_slot); + assert_eq!(harness1.head_slot(), end_slot); + assert_eq!(harness2.head_slot(), end_slot); // Resume from disk with the hard-fork activated: this should revert the post-fork blocks. // We have to do some hackery with the `slot_clock` so that the correct slot is set when @@ -2341,24 +2401,27 @@ fn revert_minority_fork_on_resume() { .build(); // Head should now be just before the fork. - resumed_harness.chain.fork_choice().unwrap(); - let head = resumed_harness.chain.head_info().unwrap(); - assert_eq!(head.slot, fork_slot - 1); + resumed_harness.chain.recompute_head_at_current_slot().await; + assert_eq!(resumed_harness.head_slot(), fork_slot - 1); // Head track should know the canonical head and the rogue head. assert_eq!(resumed_harness.chain.heads().len(), 2); - assert!(resumed_harness.chain.knows_head(&head.block_root.into())); + assert!(resumed_harness + .chain + .knows_head(&resumed_harness.head_block_root().into())); // Apply blocks from the majority chain and trigger finalization. let initial_split_slot = resumed_harness.chain.store.get_split_slot(); for block in &majority_blocks { - resumed_harness.process_block_result(block.clone()).unwrap(); + resumed_harness + .process_block_result(block.clone()) + .await + .unwrap(); // The canonical head should be the block from the majority chain. - resumed_harness.chain.fork_choice().unwrap(); - let head_info = resumed_harness.chain.head_info().unwrap(); - assert_eq!(head_info.slot, block.slot()); - assert_eq!(head_info.block_root, block.canonical_root()); + resumed_harness.chain.recompute_head_at_current_slot().await; + assert_eq!(resumed_harness.head_slot(), block.slot()); + assert_eq!(resumed_harness.head_block_root(), block.canonical_root()); } let advanced_split_slot = resumed_harness.chain.store.get_split_slot(); @@ -2377,10 +2440,22 @@ fn revert_minority_fork_on_resume() { fn assert_chains_pretty_much_the_same(a: &BeaconChain, b: &BeaconChain) { assert_eq!(a.spec, b.spec, "spec should be equal"); assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal"); + let a_head = a.head_snapshot(); + let b_head = b.head_snapshot(); assert_eq!( - a.head().unwrap(), - b.head().unwrap(), - "head() should be equal" + a_head.beacon_block_root, b_head.beacon_block_root, + "head block roots should be equal" + ); + assert_eq!( + a_head.beacon_block, b_head.beacon_block, + "head blocks should be equal" + ); + // Clone with committee caches only to prevent other caches from messing with the equality + // check. + assert_eq!( + a_head.beacon_state.clone_with_only_committee_caches(), + b_head.beacon_state.clone_with_only_committee_caches(), + "head states should be equal" ); assert_eq!(a.heads(), b.heads(), "heads() should be equal"); assert_eq!( @@ -2391,15 +2466,21 @@ fn assert_chains_pretty_much_the_same(a: &BeaconChain, b let slot = a.slot().unwrap(); let spec = T::EthSpec::default_spec(); assert!( - a.fork_choice.write().get_head(slot, &spec).unwrap() - == b.fork_choice.write().get_head(slot, &spec).unwrap(), + a.canonical_head + .fork_choice_write_lock() + .get_head(slot, &spec) + .unwrap() + == b.canonical_head + .fork_choice_write_lock() + .get_head(slot, &spec) + .unwrap(), "fork_choice heads should be equal" ); } /// Check that the head state's slot matches `expected_slot`. fn check_slot(harness: &TestHarness, expected_slot: u64) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; assert_eq!( state.slot(), @@ -2410,7 +2491,7 @@ fn check_slot(harness: &TestHarness, expected_slot: u64) { /// Check that the chain has finalized under best-case assumptions, and check the head slot. fn check_finalization(harness: &TestHarness, expected_slot: u64) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; check_slot(harness, expected_slot); @@ -2432,8 +2513,7 @@ fn check_split_slot(harness: &TestHarness, store: Arc, L assert_eq!( harness .chain - .head() - .expect("should get head") + .head_snapshot() .beacon_state .finalized_checkpoint() .epoch @@ -2520,10 +2600,7 @@ fn check_iterators(harness: &TestHarness) { max_slot = Some(slot); } // Assert that we reached the head. - assert_eq!( - max_slot, - Some(harness.chain.head_info().expect("should get head").slot) - ); + assert_eq!(max_slot, Some(harness.head_slot())); // Assert that the block root iterator reaches the head. assert_eq!( harness @@ -2533,7 +2610,7 @@ fn check_iterators(harness: &TestHarness) { .last() .map(Result::unwrap) .map(|(_, slot)| slot), - Some(harness.chain.head_info().expect("should get head").slot) + Some(harness.head_slot()) ); } diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 626c132d69..1e51b0ffb9 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -46,15 +46,8 @@ fn get_valid_sync_committee_message( slot: Slot, relative_sync_committee: RelativeSyncCommittee, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { - let head_state = harness - .chain - .head_beacon_state() - .expect("should get head state"); - let head_block_root = harness - .chain - .head() - .expect("should get head state") - .beacon_block_root; + let head_state = harness.chain.head_beacon_state_cloned(); + let head_block_root = harness.chain.head_snapshot().beacon_block_root; let (signature, _) = harness .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) .get(0) @@ -77,16 +70,9 @@ fn get_valid_sync_contribution( harness: &BeaconChainHarness>, relative_sync_committee: RelativeSyncCommittee, ) -> (SignedContributionAndProof, usize, SecretKey) { - let head_state = harness - .chain - .head_beacon_state() - .expect("should get head state"); + let head_state = harness.chain.head_beacon_state_cloned(); - let head_block_root = harness - .chain - .head() - .expect("should get head state") - .beacon_block_root; + let head_block_root = harness.chain.head_snapshot().beacon_block_root; let sync_contributions = harness.make_sync_contributions( &head_state, head_block_root, @@ -116,7 +102,7 @@ fn get_non_aggregator( harness: &BeaconChainHarness>, slot: Slot, ) -> (usize, SecretKey) { - let state = &harness.chain.head().expect("should get head").beacon_state; + let state = &harness.chain.head_snapshot().beacon_state; let sync_subcommittee_size = E::sync_committee_size() .safe_div(SYNC_COMMITTEE_SUBNET_COUNT as usize) .expect("should determine sync subcommittee size"); @@ -162,17 +148,19 @@ fn get_non_aggregator( } /// Tests verification of `SignedContributionAndProof` from the gossip network. -#[test] -fn aggregated_gossip_verification() { +#[tokio::test] +async fn aggregated_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1), Slot::new(2)], - (0..VALIDATOR_COUNT).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); @@ -406,7 +394,7 @@ fn aggregated_gossip_verification() { valid_aggregate.message.contribution.clone(), None, &non_aggregator_sk, - &harness.chain.head_info().expect("should get head info").fork, + &harness.chain.canonical_head.cached_head().head_fork(), harness.chain.genesis_validators_root, &harness.chain.spec, ) @@ -474,6 +462,7 @@ fn aggregated_gossip_verification() { harness .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .await .expect("should add block"); // **Incorrectly** create a sync contribution using the current sync committee @@ -488,17 +477,19 @@ fn aggregated_gossip_verification() { } /// Tests the verification conditions for sync committee messages on the gossip network. -#[test] -fn unaggregated_gossip_verification() { +#[tokio::test] +async fn unaggregated_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1), Slot::new(2)], - (0..VALIDATOR_COUNT).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1), Slot::new(2)], + (0..VALIDATOR_COUNT).collect::>().as_slice(), + ) + .await; let current_slot = harness.chain.slot().expect("should get slot"); @@ -648,6 +639,7 @@ fn unaggregated_gossip_verification() { harness .add_attested_block_at_slot(target_slot, state, Hash256::zero(), &[]) + .await .expect("should add block"); // **Incorrectly** create a sync message using the current sync committee diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 7b17937a21..f7d443748d 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,14 +6,17 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, OP_POOL_DB_KEY, }, - StateSkipConfig, WhenSlotSkipped, + BeaconChain, StateSkipConfig, WhenSlotSkipped, }; +use fork_choice::CountUnrealized; use lazy_static::lazy_static; use operation_pool::PersistedOperationPool; use state_processing::{ per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError, }; -use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot}; +use types::{ + BeaconState, BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot, +}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; @@ -40,7 +43,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness = harness .chain @@ -122,7 +127,7 @@ fn iterators() { ) }); - let head = &harness.chain.head().expect("should get head"); + let head = harness.chain.head_snapshot(); assert_eq!( *block_roots.last().expect("should have some block roots"), @@ -137,20 +142,44 @@ fn iterators() { ); } -#[test] -fn find_reorgs() { +fn find_reorg_slot( + chain: &BeaconChain>, + new_state: &BeaconState, + new_block_root: Hash256, +) -> Slot { + let (old_state, old_block_root) = { + let head = chain.canonical_head.cached_head(); + let old_state = head.snapshot.beacon_state.clone(); + let old_block_root = head.head_block_root(); + (old_state, old_block_root) + }; + beacon_chain::canonical_head::find_reorg_slot( + &old_state, + old_block_root, + new_state, + new_block_root, + &chain.spec, + ) + .unwrap() +} + +#[tokio::test] +async fn find_reorgs() { let num_blocks_produced = MinimalEthSpec::slots_per_historical_root() + 1; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - // No need to produce attestations for this test. - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + // No need to produce attestations for this test. + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let head_state = harness.chain.head_beacon_state().unwrap(); + let head = harness.chain.head_snapshot(); + let head_state = &head.beacon_state; let head_slot = head_state.slot(); let genesis_state = harness .chain @@ -160,10 +189,11 @@ fn find_reorgs() { // because genesis is more than `SLOTS_PER_HISTORICAL_ROOT` away, this should return with the // finalized slot. assert_eq!( - harness - .chain - .find_reorg_slot(&genesis_state, harness.chain.genesis_block_root) - .unwrap(), + find_reorg_slot( + &harness.chain, + &genesis_state, + harness.chain.genesis_block_root + ), head_state .finalized_checkpoint() .epoch @@ -172,13 +202,11 @@ fn find_reorgs() { // test head assert_eq!( - harness - .chain - .find_reorg_slot( - &head_state, - harness.chain.head_beacon_block().unwrap().canonical_root() - ) - .unwrap(), + find_reorg_slot( + &harness.chain, + &head_state, + harness.chain.head_beacon_block().canonical_root() + ), head_slot ); @@ -194,16 +222,13 @@ fn find_reorgs() { .unwrap() .unwrap(); assert_eq!( - harness - .chain - .find_reorg_slot(&prev_state, prev_block_root) - .unwrap(), + find_reorg_slot(&harness.chain, &prev_state, prev_block_root), prev_slot ); } -#[test] -fn chooses_fork() { +#[tokio::test] +async fn chooses_fork() { let harness = get_harness(VALIDATOR_COUNT); let two_thirds = (VALIDATOR_COUNT / 3) * 2; @@ -217,22 +242,27 @@ fn chooses_fork() { let faulty_fork_blocks = delay + 2; // Build an initial chain where all validators agree. - harness.extend_chain( - initial_blocks, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + initial_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block( - &honest_validators, - &faulty_validators, - honest_fork_blocks, - faulty_fork_blocks, - ); + let (honest_head, faulty_head) = harness + .generate_two_forks_by_skipping_a_block( + &honest_validators, + &faulty_validators, + honest_fork_blocks, + faulty_fork_blocks, + ) + .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -241,29 +271,28 @@ fn chooses_fork() { ); assert_eq!( - harness - .chain - .head() - .expect("should get head") - .beacon_block_root, + harness.chain.head_snapshot().beacon_block_root, honest_head, "the honest chain should be the canonical chain" ); } -#[test] -fn finalizes_with_full_participation() { +#[tokio::test] +async fn finalizes_with_full_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -287,8 +316,8 @@ fn finalizes_with_full_participation() { ); } -#[test] -fn finalizes_with_two_thirds_participation() { +#[tokio::test] +async fn finalizes_with_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -296,13 +325,16 @@ fn finalizes_with_two_thirds_participation() { let two_thirds = (VALIDATOR_COUNT / 3) * 2; let attesters = (0..two_thirds).collect(); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(attesters), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -331,8 +363,8 @@ fn finalizes_with_two_thirds_participation() { ); } -#[test] -fn does_not_finalize_with_less_than_two_thirds_participation() { +#[tokio::test] +async fn does_not_finalize_with_less_than_two_thirds_participation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -341,13 +373,16 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { let less_than_two_thirds = two_thirds - 1; let attesters = (0..less_than_two_thirds).collect(); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(attesters), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -371,19 +406,22 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { ); } -#[test] -fn does_not_finalize_without_attestation() { +#[tokio::test] +async fn does_not_finalize_without_attestation() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; assert_eq!( state.slot(), @@ -407,18 +445,20 @@ fn does_not_finalize_without_attestation() { ); } -#[test] -fn roundtrip_operation_pool() { +#[tokio::test] +async fn roundtrip_operation_pool() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); // Add some attestations - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; assert!(harness.chain.op_pool.num_attestations() > 0); // TODO: could add some other operations @@ -439,25 +479,28 @@ fn roundtrip_operation_pool() { assert_eq!(harness.chain.op_pool, restored_op_pool); } -#[test] -fn unaggregated_attestations_added_to_fork_choice_some_none() { +#[tokio::test] +async fn unaggregated_attestations_added_to_fork_choice_some_none() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; - let mut fork_choice = harness.chain.fork_choice.write(); + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let mut fork_choice = harness.chain.canonical_head.fork_choice_write_lock(); // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap()) + .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) .unwrap(); let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) @@ -493,8 +536,8 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() { } } -#[test] -fn attestations_with_increasing_slots() { +#[tokio::test] +async fn attestations_with_increasing_slots() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); @@ -502,14 +545,16 @@ fn attestations_with_increasing_slots() { let mut attestations = vec![]; for _ in 0..num_blocks_produced { - harness.extend_chain( - 2, - BlockStrategy::OnCanonicalHead, - // Don't produce & include any attestations (we'll collect them later). - AttestationStrategy::SomeValidators(vec![]), - ); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + // Don't produce & include any attestations (we'll collect them later). + AttestationStrategy::SomeValidators(vec![]), + ) + .await; - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); let head_state_root = head.beacon_state_root(); attestations.extend(harness.get_unaggregated_attestations( @@ -548,25 +593,28 @@ fn attestations_with_increasing_slots() { } } -#[test] -fn unaggregated_attestations_added_to_fork_choice_all_updated() { +#[tokio::test] +async fn unaggregated_attestations_added_to_fork_choice_all_updated() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; let harness = get_harness(VALIDATOR_COUNT); - harness.extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; - let state = &harness.chain.head().expect("should get head").beacon_state; - let mut fork_choice = harness.chain.fork_choice.write(); + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let mut fork_choice = harness.chain.canonical_head.fork_choice_write_lock(); // Move forward a slot so all queued attestations can be processed. harness.advance_slot(); fork_choice - .update_time(harness.chain.slot().unwrap()) + .update_time(harness.chain.slot().unwrap(), &harness.chain.spec) .unwrap(); let validators: Vec = (0..VALIDATOR_COUNT).collect(); @@ -605,7 +653,7 @@ fn unaggregated_attestations_added_to_fork_choice_all_updated() { } } -fn run_skip_slot_test(skip_slots: u64) { +async fn run_skip_slot_test(skip_slots: u64) { let num_validators = 8; let harness_a = get_harness(num_validators); let harness_b = get_harness(num_validators); @@ -615,29 +663,21 @@ fn run_skip_slot_test(skip_slots: u64) { harness_b.advance_slot(); } - harness_a.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - // No attestation required for test. - AttestationStrategy::SomeValidators(vec![]), - ); + harness_a + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + // No attestation required for test. + AttestationStrategy::SomeValidators(vec![]), + ) + .await; assert_eq!( - harness_a - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_a.chain.head_snapshot().beacon_block.slot(), Slot::new(skip_slots + 1) ); assert_eq!( - harness_b - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_b.chain.head_snapshot().beacon_block.slot(), Slot::new(0) ); @@ -645,53 +685,37 @@ fn run_skip_slot_test(skip_slots: u64) { harness_b .chain .process_block( - harness_a - .chain - .head() - .expect("should get head") - .beacon_block - .clone(), + harness_a.chain.head_snapshot().beacon_block.clone(), + CountUnrealized::True ) + .await .unwrap(), - harness_a - .chain - .head() - .expect("should get head") - .beacon_block_root + harness_a.chain.head_snapshot().beacon_block_root ); - harness_b - .chain - .fork_choice() - .expect("should run fork choice"); + harness_b.chain.recompute_head_at_current_slot().await; assert_eq!( - harness_b - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), + harness_b.chain.head_snapshot().beacon_block.slot(), Slot::new(skip_slots + 1) ); } -#[test] -fn produces_and_processes_with_genesis_skip_slots() { +#[tokio::test] +async fn produces_and_processes_with_genesis_skip_slots() { for i in 0..MinimalEthSpec::slots_per_epoch() * 4 { - run_skip_slot_test(i) + run_skip_slot_test(i).await } } -#[test] -fn block_roots_skip_slot_behaviour() { +#[tokio::test] +async fn block_roots_skip_slot_behaviour() { let harness = get_harness(VALIDATOR_COUNT); // Test should be longer than the block roots to ensure a DB lookup is triggered. let chain_length = harness .chain - .head() - .unwrap() + .head_snapshot() .beacon_state .block_roots() .len() as u64 @@ -708,11 +732,13 @@ fn block_roots_skip_slot_behaviour() { let slot = harness.chain.slot().unwrap().as_u64(); if !skipped_slots.contains(&slot) { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } } @@ -820,7 +846,7 @@ fn block_roots_skip_slot_behaviour() { let future_slot = harness.chain.slot().unwrap() + 1; assert_eq!( - harness.chain.head().unwrap().beacon_block.slot(), + harness.chain.head_snapshot().beacon_block.slot(), future_slot - 2, "test precondition" ); diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml new file mode 100644 index 0000000000..c4d21c59ab --- /dev/null +++ b/beacon_node/builder_client/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "builder_client" +version = "0.1.0" +edition = "2021" +authors = ["Sean Anderson "] + +[dependencies] +reqwest = { version = "0.11.0", features = ["json","stream"] } +sensitive_url = { path = "../../common/sensitive_url" } +eth2 = { path = "../../common/eth2" } +serde = { version = "1.0.116", features = ["derive"] } +serde_json = "1.0.58" \ No newline at end of file diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs new file mode 100644 index 0000000000..3517d06b15 --- /dev/null +++ b/beacon_node/builder_client/src/lib.rs @@ -0,0 +1,204 @@ +use eth2::types::builder_bid::SignedBuilderBid; +use eth2::types::{ + BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, + ForkVersionedResponse, PublicKeyBytes, SignedBeaconBlock, SignedValidatorRegistrationData, + Slot, +}; +pub use eth2::Error; +use eth2::{ok_or_error, StatusCode}; +use reqwest::{IntoUrl, Response}; +use sensitive_url::SensitiveUrl; +use serde::de::DeserializeOwned; +use serde::Serialize; +use std::time::Duration; + +pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; + +/// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20). +pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000; + +#[derive(Clone)] +pub struct Timeouts { + get_header: Duration, + post_validators: Duration, + post_blinded_blocks: Duration, + get_builder_status: Duration, +} + +impl Default for Timeouts { + fn default() -> Self { + Self { + get_header: Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS), + post_validators: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), + post_blinded_blocks: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), + get_builder_status: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), + } + } +} + +#[derive(Clone)] +pub struct BuilderHttpClient { + client: reqwest::Client, + server: SensitiveUrl, + timeouts: Timeouts, +} + +impl BuilderHttpClient { + pub fn new(server: SensitiveUrl) -> Result { + Ok(Self { + client: reqwest::Client::new(), + server, + timeouts: Timeouts::default(), + }) + } + + pub fn new_with_timeouts(server: SensitiveUrl, timeouts: Timeouts) -> Result { + Ok(Self { + client: reqwest::Client::new(), + server, + timeouts, + }) + } + + async fn get_with_timeout( + &self, + url: U, + timeout: Duration, + ) -> Result { + self.get_response_with_timeout(url, Some(timeout)) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + + /// Perform a HTTP GET request, returning the `Response` for further processing. + async fn get_response_with_timeout( + &self, + url: U, + timeout: Option, + ) -> Result { + let mut builder = self.client.get(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.send().await.map_err(Error::Reqwest)?; + ok_or_error(response).await + } + + /// Generic POST function supporting arbitrary responses and timeouts. + async fn post_generic( + &self, + url: U, + body: &T, + timeout: Option, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.json(body).send().await?; + ok_or_error(response).await + } + + async fn post_with_raw_response( + &self, + url: U, + body: &T, + timeout: Option, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.json(body).send().await.map_err(Error::Reqwest)?; + ok_or_error(response).await + } + + /// `POST /eth/v1/builder/validators` + pub async fn post_builder_validators( + &self, + validator: &[SignedValidatorRegistrationData], + ) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("validators"); + + self.post_generic(path, &validator, Some(self.timeouts.post_validators)) + .await?; + Ok(()) + } + + /// `POST /eth/v1/builder/blinded_blocks` + pub async fn post_builder_blinded_blocks( + &self, + blinded_block: &SignedBeaconBlock>, + ) -> Result>, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("blinded_blocks"); + + Ok(self + .post_with_raw_response( + path, + &blinded_block, + Some(self.timeouts.post_blinded_blocks), + ) + .await? + .json() + .await?) + } + + /// `GET /eth/v1/builder/header` + pub async fn get_builder_header>( + &self, + slot: Slot, + parent_hash: ExecutionBlockHash, + pubkey: &PublicKeyBytes, + ) -> Result>>, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("header") + .push(slot.to_string().as_str()) + .push(format!("{parent_hash:?}").as_str()) + .push(pubkey.as_hex_string().as_str()); + + let resp = self.get_with_timeout(path, self.timeouts.get_header).await; + + if matches!(resp, Err(Error::StatusCode(StatusCode::NO_CONTENT))) { + Ok(None) + } else { + resp.map(Some) + } + } + + /// `GET /eth/v1/builder/status` + pub async fn get_builder_status(&self) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("status"); + + self.get_with_timeout(path, self.timeouts.get_builder_status) + .await + } +} diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3079d7744e..d01f2505cc 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2021" [dev-dependencies] -toml = "0.5.6" +serde_yaml = "0.8.13" [dependencies] beacon_chain = { path = "../beacon_chain" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 59f1bebdb4..752ba3b7bc 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -1,6 +1,7 @@ use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; +use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ @@ -21,7 +22,7 @@ use execution_layer::ExecutionLayer; use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; -use network::{NetworkConfig, NetworkMessage, NetworkService}; +use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; @@ -30,7 +31,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; +use tokio::sync::oneshot; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, SignedBeaconBlock, @@ -65,7 +66,7 @@ pub struct ClientBuilder { beacon_chain: Option>>, eth1_service: Option, network_globals: Option>>, - network_send: Option>>, + network_senders: Option>, gossipsub_registry: Option, db_path: Option, freezer_db_path: Option, @@ -97,7 +98,7 @@ where beacon_chain: None, eth1_service: None, network_globals: None, - network_send: None, + network_senders: None, gossipsub_registry: None, db_path: None, freezer_db_path: None, @@ -276,6 +277,8 @@ where BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT)); let slots_per_epoch = TEthSpec::slots_per_epoch(); + debug!(context.log(), "Downloading finalized block"); + // Find a suitable finalized block on an epoch boundary. let mut block = remote .get_beacon_blocks_ssz::(BlockId::Finalized, &spec) @@ -290,6 +293,8 @@ where })? .ok_or("Finalized block missing from remote, it returned 404")?; + debug!(context.log(), "Downloaded finalized block"); + let mut block_slot = block.slot(); while block.slot() % slots_per_epoch != 0 { @@ -301,6 +306,12 @@ where "block_slot" => block_slot, ); + debug!( + context.log(), + "Searching for aligned checkpoint block"; + "block_slot" => block_slot + ); + if let Some(found_block) = remote .get_beacon_blocks_ssz::(BlockId::Slot(block_slot), &spec) .await @@ -312,7 +323,19 @@ where } } + debug!( + context.log(), + "Downloaded aligned finalized block"; + "block_root" => ?block.canonical_root(), + "block_slot" => block.slot(), + ); + let state_root = block.state_root(); + debug!( + context.log(), + "Downloading finalized state"; + "state_root" => ?state_root + ); let state = remote .get_debug_beacon_states_ssz::(StateId::Root(state_root), &spec) .await @@ -326,6 +349,8 @@ where format!("Checkpoint state missing from remote: {:?}", state_root) })?; + debug!(context.log(), "Downloaded finalized state"); + let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; @@ -372,7 +397,7 @@ where > = Arc::new(http_api::Context { config: self.http_api_config.clone(), chain: None, - network_tx: None, + network_senders: None, network_globals: None, eth1_service: Some(genesis_service.eth1_service.clone()), log: context.log().clone(), @@ -456,7 +481,7 @@ where None }; - let (network_globals, network_send) = NetworkService::start( + let (network_globals, network_senders) = NetworkService::start( beacon_chain, config, context.executor, @@ -468,7 +493,7 @@ where .map_err(|e| format!("Failed to start network: {:?}", e))?; self.network_globals = Some(network_globals); - self.network_send = Some(network_send); + self.network_senders = Some(network_senders); self.gossipsub_registry = gossipsub_registry; Ok(self) @@ -485,13 +510,8 @@ where .beacon_chain .clone() .ok_or("node timer requires a beacon chain")?; - let seconds_per_slot = self - .chain_spec - .as_ref() - .ok_or("node timer requires a chain spec")? - .seconds_per_slot; - spawn_timer(context.executor, beacon_chain, seconds_per_slot) + spawn_timer(context.executor, beacon_chain) .map_err(|e| format!("Unable to start node timer: {}", e))?; Ok(self) @@ -517,16 +537,16 @@ where .beacon_chain .clone() .ok_or("slasher service requires a beacon chain")?; - let network_send = self - .network_send + let network_senders = self + .network_senders .clone() - .ok_or("slasher service requires a network sender")?; + .ok_or("slasher service requires network senders")?; let context = self .runtime_context .as_ref() .ok_or("slasher requires a runtime_context")? .service_context("slasher_service_ctxt".into()); - SlasherService::new(beacon_chain, network_send).run(&context.executor) + SlasherService::new(beacon_chain, network_senders.network_send()).run(&context.executor) } /// Start the explorer client which periodically sends beacon @@ -596,7 +616,7 @@ where let ctx = Arc::new(http_api::Context { config: self.http_api_config.clone(), chain: self.beacon_chain.clone(), - network_tx: self.network_send.clone(), + network_senders: self.network_senders.clone(), network_globals: self.network_globals.clone(), eth1_service: self.eth1_service.clone(), log: log.clone(), @@ -665,26 +685,20 @@ where if let Some(execution_layer) = beacon_chain.execution_layer.as_ref() { // Only send a head update *after* genesis. if let Ok(current_slot) = beacon_chain.slot() { - let head = beacon_chain - .head_info() - .map_err(|e| format!("Unable to read beacon chain head: {:?}", e))?; - - // Issue the head to the execution engine on startup. This ensures it can start - // syncing. - if head - .execution_payload_block_hash - .map_or(false, |h| h != ExecutionBlockHash::zero()) + let params = beacon_chain + .canonical_head + .cached_head() + .forkchoice_update_parameters(); + if params + .head_hash + .map_or(false, |hash| hash != ExecutionBlockHash::zero()) { - // Spawn a new task using the "async" fork choice update method, rather than - // using the "blocking" method. - // - // Using the blocking method may cause a panic if this code is run inside an - // async context. + // Spawn a new task to update the EE without waiting for it to complete. let inner_chain = beacon_chain.clone(); runtime_context.executor.spawn( async move { let result = inner_chain - .update_execution_engine_forkchoice_async(current_slot) + .update_execution_engine_forkchoice(current_slot, params) .await; // No need to exit early if setting the head fails. It will be set again if/when the @@ -705,7 +719,7 @@ where execution_layer.spawn_watchdog_routine(beacon_chain.slot_clock.clone()); // Spawn a routine that removes expired proposer preparations. - execution_layer.spawn_clean_proposer_caches_routine::( + execution_layer.spawn_clean_proposer_caches_routine::( beacon_chain.slot_clock.clone(), ); @@ -715,6 +729,7 @@ where } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); + start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone()); } Ok(Client { @@ -792,8 +807,16 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); + let inner_spec = spec.clone(); let schema_upgrade = |db, from, to| { - migrate_schema::>(db, datadir, from, to, log) + migrate_schema::>( + db, + datadir, + from, + to, + log, + &inner_spec, + ) }; let store = HotColdDB::open( @@ -828,7 +851,7 @@ where .runtime_context .as_ref() .ok_or("caching_eth1_backend requires a runtime_context")? - .service_context("eth1_rpc".into()); + .service_context("deposit_contract_rpc".into()); let beacon_chain_builder = self .beacon_chain_builder .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 13614af12e..a5d5b37c7a 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -10,7 +10,7 @@ use types::{Graffiti, PublicKeyBytes}; const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; /// Defines how the client should initialize the `BeaconChain` and other components. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub enum ClientGenesis { /// Creates a genesis state as per the 2019 Canada interop specifications. Interop { @@ -21,6 +21,7 @@ pub enum ClientGenesis { FromStore, /// Connects to an eth1 node and waits until it can create the genesis state from the deposit /// contract. + #[default] DepositContract, /// Loads the genesis state from SSZ-encoded `BeaconState` bytes. /// @@ -38,12 +39,6 @@ pub enum ClientGenesis { }, } -impl Default for ClientGenesis { - fn default() -> Self { - Self::DepositContract - } -} - /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -198,7 +193,8 @@ mod tests { #[test] fn serde() { let config = Config::default(); - let serialized = toml::to_string(&config).expect("should serde encode default config"); - toml::from_str::(&serialized).expect("should serde decode default config"); + let serialized = + serde_yaml::to_string(&config).expect("should serde encode default config"); + serde_yaml::from_str::(&serialized).expect("should serde decode default config"); } } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 22c3bfcb3a..1da7a79707 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,13 +1,16 @@ use crate::metrics; -use beacon_chain::{BeaconChain, BeaconChainTypes, HeadSafetyStatus}; +use beacon_chain::{ + merge_readiness::{MergeConfig, MergeReadiness}, + BeaconChain, BeaconChainTypes, ExecutionStatus, +}; use lighthouse_network::{types::SyncState, NetworkGlobals}; -use parking_lot::Mutex; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::{Duration, Instant}; +use tokio::sync::Mutex; use tokio::time::sleep; -use types::{EthSpec, Slot}; +use types::*; /// Create a warning log whenever the peer count is at or below this value. pub const WARN_PEER_COUNT: usize = 1; @@ -30,20 +33,9 @@ pub fn spawn_notifier( seconds_per_slot: u64, ) -> Result<(), String> { let slot_duration = Duration::from_secs(seconds_per_slot); - let duration_to_next_slot = beacon_chain - .slot_clock - .duration_to_next_slot() - .ok_or("slot_notifier unable to determine time to next slot")?; - - // Run this half way through each slot. - let start_instant = tokio::time::Instant::now() + duration_to_next_slot + (slot_duration / 2); - - // Run this each slot. - let interval_duration = slot_duration; let speedo = Mutex::new(Speedo::default()); let log = executor.log().clone(); - let mut interval = tokio::time::interval_at(start_instant, interval_duration); // Keep track of sync state and reset the speedo on specific sync state changes. // Specifically, if we switch between a sync and a backfill sync, reset the speedo. @@ -77,8 +69,22 @@ pub fn spawn_notifier( // Perform post-genesis logging. let mut last_backfill_log_slot = None; + loop { - interval.tick().await; + // Run the notifier half way through each slot. + // + // Keep remeasuring the offset rather than using an interval, so that we can correct + // for system time clock adjustments. + let wait = match beacon_chain.slot_clock.duration_to_next_slot() { + Some(duration) => duration + slot_duration / 2, + None => { + warn!(log, "Unable to read current slot"); + sleep(slot_duration).await; + continue; + } + }; + sleep(wait).await; + let connected_peer_count = network.connected_peers(); let sync_state = network.sync_state(); @@ -87,12 +93,12 @@ pub fn spawn_notifier( match (current_sync_state, &sync_state) { (_, SyncState::BackFillSyncing { .. }) => { // We have transitioned to a backfill sync. Reset the speedo. - let mut speedo = speedo.lock(); + let mut speedo = speedo.lock().await; speedo.clear(); } (SyncState::BackFillSyncing { .. }, _) => { // We have transitioned from a backfill sync, reset the speedo - let mut speedo = speedo.lock(); + let mut speedo = speedo.lock().await; speedo.clear(); } (_, _) => {} @@ -100,15 +106,10 @@ pub fn spawn_notifier( current_sync_state = sync_state; } - let head_info = match beacon_chain.head_info() { - Ok(head_info) => head_info, - Err(e) => { - error!(log, "Failed to get beacon chain head info"; "error" => format!("{:?}", e)); - break; - } - }; - - let head_slot = head_info.slot; + let cached_head = beacon_chain.canonical_head.cached_head(); + let head_slot = cached_head.head_slot(); + let head_root = cached_head.head_block_root(); + let finalized_checkpoint = cached_head.finalized_checkpoint(); metrics::set_gauge(&metrics::NOTIFIER_HEAD_SLOT, head_slot.as_u64() as i64); @@ -125,15 +126,12 @@ pub fn spawn_notifier( }; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let finalized_epoch = head_info.finalized_checkpoint.epoch; - let finalized_root = head_info.finalized_checkpoint.root; - let head_root = head_info.block_root; // The default is for regular sync but this gets modified if backfill sync is in // progress. let mut sync_distance = current_slot - head_slot; - let mut speedo = speedo.lock(); + let mut speedo = speedo.lock().await; match current_sync_state { SyncState::BackFillSyncing { .. } => { // Observe backfilling sync info. @@ -177,8 +175,8 @@ pub fn spawn_notifier( log, "Slot timer"; "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "head_block" => format!("{}", head_root), "head_slot" => head_slot, "current_slot" => current_slot, @@ -264,35 +262,29 @@ pub fn spawn_notifier( head_root.to_string() }; - let block_hash = match beacon_chain.head_safety_status() { - Ok(HeadSafetyStatus::Safe(hash_opt)) => hash_opt - .map(|hash| format!("{} (verified)", hash)) - .unwrap_or_else(|| "n/a".to_string()), - Ok(HeadSafetyStatus::Unsafe(block_hash)) => { + let block_hash = match beacon_chain.canonical_head.head_execution_status() { + Ok(ExecutionStatus::Irrelevant(_)) => "n/a".to_string(), + Ok(ExecutionStatus::Valid(hash)) => format!("{} (verified)", hash), + Ok(ExecutionStatus::Optimistic(hash)) => { warn!( log, - "Head execution payload is unverified"; - "execution_block_hash" => ?block_hash, + "Head is optimistic"; + "info" => "chain not fully verified, \ + block and attestation production disabled until execution engine syncs", + "execution_block_hash" => ?hash, ); - format!("{} (unverified)", block_hash) + format!("{} (unverified)", hash) } - Ok(HeadSafetyStatus::Invalid(block_hash)) => { + Ok(ExecutionStatus::Invalid(hash)) => { crit!( log, "Head execution payload is invalid"; "msg" => "this scenario may be unrecoverable", - "execution_block_hash" => ?block_hash, + "execution_block_hash" => ?hash, ); - format!("{} (invalid)", block_hash) - } - Err(e) => { - error!( - log, - "Failed to read head safety status"; - "error" => ?e - ); - "n/a".to_string() + format!("{} (invalid)", hash) } + Err(_) => "unknown".to_string(), }; info!( @@ -300,8 +292,8 @@ pub fn spawn_notifier( "Synced"; "peers" => peer_count_pretty(connected_peer_count), "exec_hash" => block_hash, - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "epoch" => current_epoch, "block" => block_info, "slot" => current_slot, @@ -312,14 +304,15 @@ pub fn spawn_notifier( log, "Searching for peers"; "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, + "finalized_root" => format!("{}", finalized_checkpoint.root), + "finalized_epoch" => finalized_checkpoint.epoch, "head_slot" => head_slot, "current_slot" => current_slot, ); } eth1_logging(&beacon_chain, &log); + merge_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -329,60 +322,152 @@ pub fn spawn_notifier( Ok(()) } +/// Provides some helpful logging to users to indicate if their node is ready for the Bellatrix +/// fork and subsequent merge transition. +async fn merge_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let merge_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_block + .message() + .body() + .execution_payload() + .map_or(false, |payload| { + payload.parent_hash() != ExecutionBlockHash::zero() + }); + + let has_execution_layer = beacon_chain.execution_layer.is_some(); + + if merge_completed && has_execution_layer + || !beacon_chain.is_time_to_prepare_for_bellatrix(current_slot) + { + return; + } + + if merge_completed && !has_execution_layer { + error!( + log, + "Execution endpoint required"; + "info" => "you need an execution engine to validate blocks, see: \ + https://lighthouse-book.sigmaprime.io/merge-migration.html" + ); + return; + } + + match beacon_chain.check_merge_readiness().await { + MergeReadiness::Ready { + config, + current_difficulty, + } => match config { + MergeConfig { + terminal_total_difficulty: Some(ttd), + terminal_block_hash: None, + terminal_block_hash_epoch: None, + } => { + info!( + log, + "Ready for the merge"; + "terminal_total_difficulty" => %ttd, + "current_difficulty" => current_difficulty + .map(|d| d.to_string()) + .unwrap_or_else(|| "??".into()), + ) + } + MergeConfig { + terminal_total_difficulty: _, + terminal_block_hash: Some(terminal_block_hash), + terminal_block_hash_epoch: Some(terminal_block_hash_epoch), + } => { + info!( + log, + "Ready for the merge"; + "info" => "you are using override parameters, please ensure that you \ + understand these parameters and their implications.", + "terminal_block_hash" => ?terminal_block_hash, + "terminal_block_hash_epoch" => ?terminal_block_hash_epoch, + ) + } + other => error!( + log, + "Inconsistent merge configuration"; + "config" => ?other + ), + }, + readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed { error: _ } => { + error!( + log, + "Not ready for merge"; + "info" => %readiness, + "hint" => "try updating Lighthouse and/or the execution layer", + ) + } + readiness @ MergeReadiness::NotSynced => warn!( + log, + "Not ready for merge"; + "info" => %readiness, + ), + readiness @ MergeReadiness::NoExecutionEndpoint => warn!( + log, + "Not ready for merge"; + "info" => %readiness, + ), + } +} + fn eth1_logging(beacon_chain: &BeaconChain, log: &Logger) { let current_slot_opt = beacon_chain.slot().ok(); - if let Ok(head_info) = beacon_chain.head_info() { - // Perform some logging about the eth1 chain - if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { - // No need to do logging if using the dummy backend. - if eth1_chain.is_dummy_backend() { - return; - } - - if let Some(status) = - eth1_chain.sync_status(head_info.genesis_time, current_slot_opt, &beacon_chain.spec) - { - debug!( - log, - "Eth1 cache sync status"; - "eth1_head_block" => status.head_block_number, - "latest_cached_block_number" => status.latest_cached_block_number, - "latest_cached_timestamp" => status.latest_cached_block_timestamp, - "voting_target_timestamp" => status.voting_target_timestamp, - "ready" => status.lighthouse_is_cached_and_ready - ); - - if !status.lighthouse_is_cached_and_ready { - let voting_target_timestamp = status.voting_target_timestamp; - - let distance = status - .latest_cached_block_timestamp - .map(|latest| { - voting_target_timestamp.saturating_sub(latest) - / beacon_chain.spec.seconds_per_eth1_block - }) - .map(|distance| distance.to_string()) - .unwrap_or_else(|| "initializing deposits".to_string()); - - warn!( - log, - "Syncing eth1 block cache"; - "est_blocks_remaining" => distance, - ); - } - } else { - error!( - log, - "Unable to determine eth1 sync status"; - ); - } + // Perform some logging about the eth1 chain + if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { + // No need to do logging if using the dummy backend. + if eth1_chain.is_dummy_backend() { + return; + } + + if let Some(status) = eth1_chain.sync_status( + beacon_chain.genesis_time, + current_slot_opt, + &beacon_chain.spec, + ) { + debug!( + log, + "Eth1 cache sync status"; + "eth1_head_block" => status.head_block_number, + "latest_cached_block_number" => status.latest_cached_block_number, + "latest_cached_timestamp" => status.latest_cached_block_timestamp, + "voting_target_timestamp" => status.voting_target_timestamp, + "ready" => status.lighthouse_is_cached_and_ready + ); + + if !status.lighthouse_is_cached_and_ready { + let voting_target_timestamp = status.voting_target_timestamp; + + let distance = status + .latest_cached_block_timestamp + .map(|latest| { + voting_target_timestamp.saturating_sub(latest) + / beacon_chain.spec.seconds_per_eth1_block + }) + .map(|distance| distance.to_string()) + .unwrap_or_else(|| "initializing deposits".to_string()); + + warn!( + log, + "Syncing deposit contract block cache"; + "est_blocks_remaining" => distance, + ); + } + } else { + error!( + log, + "Unable to determine deposit contract sync status"; + ); } - } else { - error!( - log, - "Unable to get head info"; - ); } } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index ecf3c19e30..403869cc9c 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -6,13 +6,14 @@ edition = "2021" [dev-dependencies] eth1_test_rig = { path = "../../testing/eth1_test_rig" } -toml = "0.5.6" +serde_yaml = "0.8.13" web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } sloggers = { version = "2.1.1", features = ["json"] } environment = { path = "../../lighthouse/environment" } [dependencies] reqwest = { version = "0.11.0", features = ["native-tls-vendored"] } +execution_layer = { path = "../execution_layer" } futures = "0.3.7" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index 7c67893fb3..078e3602f5 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -1,4 +1,4 @@ -use crate::DepositLog; +use execution_layer::http::deposit_log::DepositLog; use ssz_derive::{Decode, Encode}; use state_processing::common::DepositDataTree; use std::cmp::Ordering; @@ -297,12 +297,37 @@ impl DepositCache { #[cfg(test)] pub mod tests { use super::*; - use crate::deposit_log::tests::EXAMPLE_LOG; - use crate::http::Log; + use execution_layer::http::deposit_log::Log; use types::{EthSpec, MainnetEthSpec}; pub const TREE_DEPTH: usize = 32; + /// The data from a deposit event, using the v0.8.3 version of the deposit contract. + pub const EXAMPLE_LOG: &[u8] = &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82, + 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239, + 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, + 30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, + 119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, + 187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, + 149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, + 18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + fn example_log() -> DepositLog { let spec = MainnetEthSpec::default_spec(); diff --git a/beacon_node/eth1/src/deposit_log.rs b/beacon_node/eth1/src/deposit_log.rs deleted file mode 100644 index 1b3cfa01a0..0000000000 --- a/beacon_node/eth1/src/deposit_log.rs +++ /dev/null @@ -1,107 +0,0 @@ -use super::http::Log; -use ssz::Decode; -use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; -use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes}; - -pub use eth2::lighthouse::DepositLog; - -/// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The -/// event bytes are formatted according to the Ethereum ABI. -const PUBKEY_START: usize = 192; -const PUBKEY_LEN: usize = 48; -const CREDS_START: usize = PUBKEY_START + 64 + 32; -const CREDS_LEN: usize = 32; -const AMOUNT_START: usize = CREDS_START + 32 + 32; -const AMOUNT_LEN: usize = 8; -const SIG_START: usize = AMOUNT_START + 32 + 32; -const SIG_LEN: usize = 96; -const INDEX_START: usize = SIG_START + 96 + 32; -const INDEX_LEN: usize = 8; - -impl Log { - /// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`. - pub fn to_deposit_log(&self, spec: &ChainSpec) -> Result { - let bytes = &self.data; - - let pubkey = bytes - .get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN) - .ok_or("Insufficient bytes for pubkey")?; - let withdrawal_credentials = bytes - .get(CREDS_START..CREDS_START + CREDS_LEN) - .ok_or("Insufficient bytes for withdrawal credential")?; - let amount = bytes - .get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN) - .ok_or("Insufficient bytes for amount")?; - let signature = bytes - .get(SIG_START..SIG_START + SIG_LEN) - .ok_or("Insufficient bytes for signature")?; - let index = bytes - .get(INDEX_START..INDEX_START + INDEX_LEN) - .ok_or("Insufficient bytes for index")?; - - let deposit_data = DepositData { - pubkey: PublicKeyBytes::from_ssz_bytes(pubkey) - .map_err(|e| format!("Invalid pubkey ssz: {:?}", e))?, - withdrawal_credentials: Hash256::from_ssz_bytes(withdrawal_credentials) - .map_err(|e| format!("Invalid withdrawal_credentials ssz: {:?}", e))?, - amount: u64::from_ssz_bytes(amount) - .map_err(|e| format!("Invalid amount ssz: {:?}", e))?, - signature: SignatureBytes::from_ssz_bytes(signature) - .map_err(|e| format!("Invalid signature ssz: {:?}", e))?, - }; - - let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec) - .map_or(false, |(public_key, signature, msg)| { - signature.verify(&public_key, msg) - }); - - Ok(DepositLog { - deposit_data, - block_number: self.block_number, - index: u64::from_ssz_bytes(index).map_err(|e| format!("Invalid index ssz: {:?}", e))?, - signature_is_valid, - }) - } -} - -#[cfg(test)] -pub mod tests { - use crate::http::Log; - use types::{EthSpec, MainnetEthSpec}; - - /// The data from a deposit event, using the v0.8.3 version of the deposit contract. - pub const EXAMPLE_LOG: &[u8] = &[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82, - 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239, - 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, - 30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, - 119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, - 187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, - 149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, - 18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]; - - #[test] - fn can_parse_example_log() { - let log = Log { - block_number: 42, - data: EXAMPLE_LOG.to_vec(), - }; - log.to_deposit_log(&MainnetEthSpec::default_spec()) - .expect("should decode log"); - } -} diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs deleted file mode 100644 index 71b1b5b4b2..0000000000 --- a/beacon_node/eth1/src/http.rs +++ /dev/null @@ -1,489 +0,0 @@ -//! Provides a very minimal set of functions for interfacing with the eth2 deposit contract via an -//! eth1 HTTP JSON-RPC endpoint. -//! -//! All remote functions return a future (i.e., are async). -//! -//! Does not use a web3 library, instead it uses `reqwest` (`hyper`) to call the remote endpoint -//! and `serde` to decode the response. -//! -//! ## Note -//! -//! There is no ABI parsing here, all function signatures and topics are hard-coded as constants. - -use futures::future::TryFutureExt; -use reqwest::{header::CONTENT_TYPE, ClientBuilder, StatusCode}; -use sensitive_url::SensitiveUrl; -use serde::{Deserialize, Serialize}; -use serde_json::{json, Value}; -use std::fmt; -use std::ops::Range; -use std::str::FromStr; -use std::time::Duration; -use types::Hash256; - -/// `keccak("DepositEvent(bytes,bytes,bytes,bytes,bytes)")` -pub const DEPOSIT_EVENT_TOPIC: &str = - "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"; -/// `keccak("get_deposit_root()")[0..4]` -pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0xc5f2892f"; -/// `keccak("get_deposit_count()")[0..4]` -pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130"; - -/// Number of bytes in deposit contract deposit root response. -pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96; -/// Number of bytes in deposit contract deposit root (value only). -pub const DEPOSIT_ROOT_BYTES: usize = 32; - -/// This error is returned during a `chainId` call by Geth. -pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; - -/// Represents an eth1 chain/network id. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub enum Eth1Id { - Goerli, - Mainnet, - Custom(u64), -} - -/// Used to identify a block when querying the Eth1 node. -#[derive(Clone, Copy)] -pub enum BlockQuery { - Number(u64), - Latest, -} - -/// Represents an error received from a remote procecdure call. -#[derive(Debug, Serialize, Deserialize)] -pub enum RpcError { - NoResultField, - Eip155Error, - InvalidJson(String), - Error(String), -} - -impl fmt::Display for RpcError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - RpcError::NoResultField => write!(f, "No result field in response"), - RpcError::Eip155Error => write!(f, "Not synced past EIP-155"), - RpcError::InvalidJson(e) => write!(f, "Malformed JSON received: {}", e), - RpcError::Error(s) => write!(f, "{}", s), - } - } -} - -impl From for String { - fn from(e: RpcError) -> String { - e.to_string() - } -} - -impl Into for Eth1Id { - fn into(self) -> u64 { - match self { - Eth1Id::Mainnet => 1, - Eth1Id::Goerli => 5, - Eth1Id::Custom(id) => id, - } - } -} - -impl From for Eth1Id { - fn from(id: u64) -> Self { - let into = |x: Eth1Id| -> u64 { x.into() }; - match id { - id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet, - id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli, - id => Eth1Id::Custom(id), - } - } -} - -impl FromStr for Eth1Id { - type Err = String; - - fn from_str(s: &str) -> Result { - s.parse::() - .map(Into::into) - .map_err(|e| format!("Failed to parse eth1 network id {}", e)) - } -} - -/// Get the eth1 network id of the given endpoint. -pub async fn get_network_id(endpoint: &SensitiveUrl, timeout: Duration) -> Result { - let response_body = send_rpc_request(endpoint, "net_version", json!([]), timeout).await?; - Eth1Id::from_str( - response_result_or_error(&response_body)? - .as_str() - .ok_or("Data was not string")?, - ) -} - -/// Get the eth1 chain id of the given endpoint. -pub async fn get_chain_id(endpoint: &SensitiveUrl, timeout: Duration) -> Result { - let response_body: String = - send_rpc_request(endpoint, "eth_chainId", json!([]), timeout).await?; - - match response_result_or_error(&response_body) { - Ok(chain_id) => { - hex_to_u64_be(chain_id.as_str().ok_or("Data was not string")?).map(|id| id.into()) - } - // Geth returns this error when it's syncing lower blocks. Simply map this into `0` since - // Lighthouse does not raise errors for `0`, it simply waits for it to change. - Err(RpcError::Eip155Error) => Ok(Eth1Id::Custom(0)), - Err(e) => Err(e.to_string()), - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct Block { - pub hash: Hash256, - pub timestamp: u64, - pub number: u64, -} - -/// Returns the current block number. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_block_number(endpoint: &SensitiveUrl, timeout: Duration) -> Result { - let response_body = send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout).await?; - hex_to_u64_be( - response_result_or_error(&response_body) - .map_err(|e| format!("eth_blockNumber failed: {}", e))? - .as_str() - .ok_or("Data was not string")?, - ) - .map_err(|e| format!("Failed to get block number: {}", e)) -} - -/// Gets a block hash by block number. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_block( - endpoint: &SensitiveUrl, - query: BlockQuery, - timeout: Duration, -) -> Result { - let query_param = match query { - BlockQuery::Number(block_number) => format!("0x{:x}", block_number), - BlockQuery::Latest => "latest".to_string(), - }; - let params = json!([ - query_param, - false // do not return full tx objects. - ]); - - let response_body = send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout).await?; - let response = response_result_or_error(&response_body) - .map_err(|e| format!("eth_getBlockByNumber failed: {}", e))?; - - let hash: Vec = hex_to_bytes( - response - .get("hash") - .ok_or("No hash for block")? - .as_str() - .ok_or("Block hash was not string")?, - )?; - let hash: Hash256 = if hash.len() == 32 { - Hash256::from_slice(&hash) - } else { - return Err(format!("Block has was not 32 bytes: {:?}", hash)); - }; - - let timestamp = hex_to_u64_be( - response - .get("timestamp") - .ok_or("No timestamp for block")? - .as_str() - .ok_or("Block timestamp was not string")?, - )?; - - let number = hex_to_u64_be( - response - .get("number") - .ok_or("No number for block")? - .as_str() - .ok_or("Block number was not string")?, - )?; - - if number <= usize::max_value() as u64 { - Ok(Block { - hash, - timestamp, - number, - }) - } else { - Err(format!("Block number {} is larger than a usize", number)) - } - .map_err(|e| format!("Failed to get block number: {}", e)) -} - -/// Returns the value of the `get_deposit_count()` call at the given `address` for the given -/// `block_number`. -/// -/// Assumes that the `address` has the same ABI as the eth2 deposit contract. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_deposit_count( - endpoint: &SensitiveUrl, - address: &str, - block_number: u64, - timeout: Duration, -) -> Result, String> { - let result = call( - endpoint, - address, - DEPOSIT_COUNT_FN_SIGNATURE, - block_number, - timeout, - ) - .await?; - match result { - None => Err("Deposit root response was none".to_string()), - Some(bytes) => { - if bytes.is_empty() { - Ok(None) - } else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES { - let mut array = [0; 8]; - array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]); - Ok(Some(u64::from_le_bytes(array))) - } else { - Err(format!( - "Deposit count response was not {} bytes: {:?}", - DEPOSIT_COUNT_RESPONSE_BYTES, bytes - )) - } - } - } -} - -/// Returns the value of the `get_hash_tree_root()` call at the given `block_number`. -/// -/// Assumes that the `address` has the same ABI as the eth2 deposit contract. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_deposit_root( - endpoint: &SensitiveUrl, - address: &str, - block_number: u64, - timeout: Duration, -) -> Result, String> { - let result = call( - endpoint, - address, - DEPOSIT_ROOT_FN_SIGNATURE, - block_number, - timeout, - ) - .await?; - match result { - None => Err("Deposit root response was none".to_string()), - Some(bytes) => { - if bytes.is_empty() { - Ok(None) - } else if bytes.len() == DEPOSIT_ROOT_BYTES { - Ok(Some(Hash256::from_slice(&bytes))) - } else { - Err(format!( - "Deposit root response was not {} bytes: {:?}", - DEPOSIT_ROOT_BYTES, bytes - )) - } - } - } -} - -/// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed -/// `hex_data`. -/// -/// Returns bytes, if any. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -async fn call( - endpoint: &SensitiveUrl, - address: &str, - hex_data: &str, - block_number: u64, - timeout: Duration, -) -> Result>, String> { - let params = json! ([ - { - "to": address, - "data": hex_data, - }, - format!("0x{:x}", block_number) - ]); - - let response_body = send_rpc_request(endpoint, "eth_call", params, timeout).await?; - - match response_result_or_error(&response_body) { - Ok(result) => { - let hex = result - .as_str() - .map(|s| s.to_string()) - .ok_or("'result' value was not a string")?; - - Ok(Some(hex_to_bytes(&hex)?)) - } - // It's valid for `eth_call` to return without a result. - Err(RpcError::NoResultField) => Ok(None), - Err(e) => Err(format!("eth_call failed: {}", e)), - } -} - -/// A reduced set of fields from an Eth1 contract log. -#[derive(Debug, PartialEq, Clone)] -pub struct Log { - pub(crate) block_number: u64, - pub(crate) data: Vec, -} - -/// Returns logs for the `DEPOSIT_EVENT_TOPIC`, for the given `address` in the given -/// `block_height_range`. -/// -/// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not. -/// -/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. -pub async fn get_deposit_logs_in_range( - endpoint: &SensitiveUrl, - address: &str, - block_height_range: Range, - timeout: Duration, -) -> Result, String> { - let params = json! ([{ - "address": address, - "topics": [DEPOSIT_EVENT_TOPIC], - "fromBlock": format!("0x{:x}", block_height_range.start), - "toBlock": format!("0x{:x}", block_height_range.end), - }]); - - let response_body = send_rpc_request(endpoint, "eth_getLogs", params, timeout).await?; - response_result_or_error(&response_body) - .map_err(|e| format!("eth_getLogs failed: {}", e))? - .as_array() - .cloned() - .ok_or("'result' value was not an array")? - .into_iter() - .map(|value| { - let block_number = value - .get("blockNumber") - .ok_or("No block number field in log")? - .as_str() - .ok_or("Block number was not string")?; - - let data = value - .get("data") - .ok_or("No block number field in log")? - .as_str() - .ok_or("Data was not string")?; - - Ok(Log { - block_number: hex_to_u64_be(block_number)?, - data: hex_to_bytes(data)?, - }) - }) - .collect::, String>>() - .map_err(|e| format!("Failed to get logs in range: {}", e)) -} - -/// Sends an RPC request to `endpoint`, using a POST with the given `body`. -/// -/// Tries to receive the response and parse the body as a `String`. -pub async fn send_rpc_request( - endpoint: &SensitiveUrl, - method: &str, - params: Value, - timeout: Duration, -) -> Result { - let body = json! ({ - "jsonrpc": "2.0", - "method": method, - "params": params, - "id": 1 - }) - .to_string(); - - // Note: it is not ideal to create a new client for each request. - // - // A better solution would be to create some struct that contains a built client and pass it - // around (similar to the `web3` crate's `Transport` structs). - let response = ClientBuilder::new() - .timeout(timeout) - .build() - .expect("The builder should always build a client") - .post(endpoint.full.clone()) - .header(CONTENT_TYPE, "application/json") - .body(body) - .send() - .map_err(|e| format!("Request failed: {:?}", e)) - .await?; - if response.status() != StatusCode::OK { - return Err(format!( - "Response HTTP status was not 200 OK: {}.", - response.status() - )); - }; - let encoding = response - .headers() - .get(CONTENT_TYPE) - .ok_or("No content-type header in response")? - .to_str() - .map(|s| s.to_string()) - .map_err(|e| format!("Failed to parse content-type header: {}", e))?; - - response - .bytes() - .map_err(|e| format!("Failed to receive body: {:?}", e)) - .await - .and_then(move |bytes| match encoding.as_str() { - "application/json" => Ok(bytes), - "application/json; charset=utf-8" => Ok(bytes), - other => Err(format!("Unsupported encoding: {}", other)), - }) - .map(|bytes| String::from_utf8_lossy(&bytes).into_owned()) - .map_err(|e| format!("Failed to receive body: {:?}", e)) -} - -/// Accepts an entire HTTP body (as a string) and returns either the `result` field or the `error['message']` field, as a serde `Value`. -fn response_result_or_error(response: &str) -> Result { - let json = serde_json::from_str::(response) - .map_err(|e| RpcError::InvalidJson(e.to_string()))?; - - if let Some(error) = json.get("error").and_then(|e| e.get("message")) { - let error = error.to_string(); - if error.contains(EIP155_ERROR_STR) { - Err(RpcError::Eip155Error) - } else { - Err(RpcError::Error(error)) - } - } else { - json.get("result").cloned().ok_or(RpcError::NoResultField) - } -} - -/// Parses a `0x`-prefixed, **big-endian** hex string as a u64. -/// -/// Note: the JSON-RPC encodes integers as big-endian. The deposit contract uses little-endian. -/// Therefore, this function is only useful for numbers encoded by the JSON RPC. -/// -/// E.g., `0x01 == 1` -fn hex_to_u64_be(hex: &str) -> Result { - u64::from_str_radix(strip_prefix(hex)?, 16) - .map_err(|e| format!("Failed to parse hex as u64: {:?}", e)) -} - -/// Parses a `0x`-prefixed, big-endian hex string as bytes. -/// -/// E.g., `0x0102 == vec![1, 2]` -fn hex_to_bytes(hex: &str) -> Result, String> { - hex::decode(strip_prefix(hex)?).map_err(|e| format!("Failed to parse hex as bytes: {:?}", e)) -} - -/// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present. -fn strip_prefix(hex: &str) -> Result<&str, String> { - if let Some(stripped) = hex.strip_prefix("0x") { - Ok(stripped) - } else { - Err("Hex string did not start with `0x`".to_string()) - } -} diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index cf724201a4..f99d085250 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -3,17 +3,15 @@ extern crate lazy_static; mod block_cache; mod deposit_cache; -mod deposit_log; -pub mod http; mod inner; mod metrics; mod service; pub use block_cache::{BlockCache, Eth1Block}; pub use deposit_cache::DepositCache; -pub use deposit_log::DepositLog; +pub use execution_layer::http::deposit_log::DepositLog; pub use inner::SszEth1Cache; pub use service::{ - BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service, DEFAULT_CHAIN_ID, - DEFAULT_NETWORK_ID, + BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service, + DEFAULT_CHAIN_ID, }; diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 460f53e732..a4d4e5e254 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -2,29 +2,29 @@ use crate::metrics; use crate::{ block_cache::{BlockCache, Error as BlockCacheError, Eth1Block}, deposit_cache::{DepositCacheInsertOutcome, Error as DepositCacheError}, - http::{ - get_block, get_block_number, get_chain_id, get_deposit_logs_in_range, get_network_id, - BlockQuery, Eth1Id, - }, inner::{DepositUpdater, Inner}, }; +use execution_layer::auth::Auth; +use execution_layer::http::{ + deposit_methods::{BlockQuery, Eth1Id}, + HttpJsonRpc, +}; use fallback::{Fallback, FallbackError}; use futures::future::TryFutureExt; use parking_lot::{RwLock, RwLockReadGuard}; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, trace, warn, Logger}; +use slog::{debug, error, info, trace, warn, Logger}; use std::fmt::Debug; use std::future::Future; use std::ops::{Range, RangeInclusive}; +use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::sync::RwLock as TRwLock; use tokio::time::{interval_at, Duration, Instant}; use types::{ChainSpec, EthSpec, Unsigned}; -/// Indicates the default eth1 network id we use for the deposit contract. -pub const DEFAULT_NETWORK_ID: Eth1Id = Eth1Id::Goerli; /// Indicates the default eth1 chain id we use for the deposit contract. pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; /// Indicates the default eth1 endpoint. @@ -39,10 +39,16 @@ const GET_BLOCK_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; /// Timeout when doing an eth_getLogs to read the deposit contract logs. const GET_DEPOSIT_LOG_TIMEOUT_MILLIS: u64 = 60_000; -const WARNING_MSG: &str = "BLOCK PROPOSALS WILL FAIL WITHOUT VALID, SYNCED ETH1 CONNECTION"; +/// Number of blocks to download if the node detects it is lagging behind due to an inaccurate +/// relationship between block-number-based follow distance and time-based follow distance. +const CATCHUP_BATCH_SIZE: u64 = 128; -/// A factor used to reduce the eth1 follow distance to account for discrepancies in the block time. -const ETH1_BLOCK_TIME_TOLERANCE_FACTOR: u64 = 4; +/// The absolute minimum follow distance to enforce when downloading catchup batches. +const CATCHUP_MIN_FOLLOW_DISTANCE: u64 = 64; + +/// To account for fast PoW blocks requiring more blocks in the cache than the block-based follow +/// distance would imply, we store `CACHE_FACTOR` more blocks in our cache. +const CACHE_FACTOR: u64 = 2; #[derive(Debug, PartialEq, Clone)] pub enum EndpointError { @@ -55,14 +61,14 @@ pub enum EndpointError { type EndpointState = Result<(), EndpointError>; pub struct EndpointWithState { - endpoint: SensitiveUrl, + client: HttpJsonRpc, state: TRwLock>, } impl EndpointWithState { - pub fn new(endpoint: SensitiveUrl) -> Self { + pub fn new(client: HttpJsonRpc) -> Self { Self { - endpoint, + client, state: TRwLock::new(None), } } @@ -81,7 +87,6 @@ async fn get_state(endpoint: &EndpointWithState) -> Option { /// is not usable. pub struct EndpointsCache { pub fallback: Fallback, - pub config_network_id: Eth1Id, pub config_chain_id: Eth1Id, pub log: Logger, } @@ -99,20 +104,14 @@ impl EndpointsCache { } crate::metrics::inc_counter_vec( &crate::metrics::ENDPOINT_REQUESTS, - &[&endpoint.endpoint.to_string()], + &[&endpoint.client.to_string()], ); - let state = endpoint_state( - &endpoint.endpoint, - &self.config_network_id, - &self.config_chain_id, - &self.log, - ) - .await; + let state = endpoint_state(&endpoint.client, &self.config_chain_id, &self.log).await; *value = Some(state.clone()); if state.is_err() { crate::metrics::inc_counter_vec( &crate::metrics::ENDPOINT_ERRORS, - &[&endpoint.endpoint.to_string()], + &[&endpoint.client.to_string()], ); crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); } else { @@ -128,7 +127,7 @@ impl EndpointsCache { func: F, ) -> Result<(O, usize), FallbackError> where - F: Fn(&'a SensitiveUrl) -> R, + F: Fn(&'a HttpJsonRpc) -> R, R: Future>, { let func = &func; @@ -136,12 +135,12 @@ impl EndpointsCache { .first_success(|endpoint| async move { match self.state(endpoint).await { Ok(()) => { - let endpoint_str = &endpoint.endpoint.to_string(); + let endpoint_str = &endpoint.client.to_string(); crate::metrics::inc_counter_vec( &crate::metrics::ENDPOINT_REQUESTS, &[endpoint_str], ); - match func(&endpoint.endpoint).await { + match func(&endpoint.client).await { Ok(t) => Ok(t), Err(t) => { crate::metrics::inc_counter_vec( @@ -178,8 +177,7 @@ impl EndpointsCache { /// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and /// chain id. Otherwise it returns `Err`. async fn endpoint_state( - endpoint: &SensitiveUrl, - config_network_id: &Eth1Id, + endpoint: &HttpJsonRpc, config_chain_id: &Eth1Id, log: &Logger, ) -> EndpointState { @@ -192,21 +190,9 @@ async fn endpoint_state( ); EndpointError::RequestFailed(e) }; - let network_id = get_network_id(endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) - .await - .map_err(error_connecting)?; - if &network_id != config_network_id { - warn!( - log, - "Invalid eth1 network id on endpoint. Please switch to correct network id"; - "endpoint" => %endpoint, - "action" => "trying fallbacks", - "expected" => format!("{:?}",config_network_id), - "received" => format!("{:?}",network_id), - ); - return Err(EndpointError::WrongNetworkId); - } - let chain_id = get_chain_id(endpoint, Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) + + let chain_id = endpoint + .get_chain_id(Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) .await .map_err(error_connecting)?; // Eth1 nodes return chain_id = 0 if the node is not synced @@ -214,7 +200,7 @@ async fn endpoint_state( if chain_id == Eth1Id::Custom(0) { warn!( log, - "Remote eth1 node is not synced"; + "Remote execution node is not synced"; "endpoint" => %endpoint, "action" => "trying fallbacks" ); @@ -223,11 +209,11 @@ async fn endpoint_state( if &chain_id != config_chain_id { warn!( log, - "Invalid eth1 chain id. Please switch to correct chain id on endpoint"; + "Invalid execution chain ID. Please switch to correct chain ID on endpoint"; "endpoint" => %endpoint, "action" => "trying fallbacks", - "expected" => format!("{:?}",config_chain_id), - "received" => format!("{:?}", chain_id), + "expected" => ?config_chain_id, + "received" => ?chain_id, ); Err(EndpointError::WrongChainId) } else { @@ -245,7 +231,7 @@ pub enum HeadType { /// Returns the head block and the new block ranges relevant for deposits and the block cache /// from the given endpoint. async fn get_remote_head_and_new_block_ranges( - endpoint: &SensitiveUrl, + endpoint: &HttpJsonRpc, service: &Service, node_far_behind_seconds: u64, ) -> Result< @@ -264,7 +250,7 @@ async fn get_remote_head_and_new_block_ranges( if remote_head_block.timestamp + node_far_behind_seconds < now { warn!( service.log, - "Eth1 endpoint is not synced"; + "Execution endpoint is not synced"; "endpoint" => %endpoint, "last_seen_block_unix_timestamp" => remote_head_block.timestamp, "action" => "trying fallback" @@ -276,7 +262,7 @@ async fn get_remote_head_and_new_block_ranges( if let SingleEndpointError::RemoteNotSynced { .. } = e { warn!( service.log, - "Eth1 endpoint is not synced"; + "Execution endpoint is not synced"; "endpoint" => %endpoint, "action" => "trying fallbacks" ); @@ -284,10 +270,18 @@ async fn get_remote_head_and_new_block_ranges( e }; let new_deposit_block_numbers = service - .relevant_new_block_numbers(remote_head_block.number, HeadType::Deposit) + .relevant_new_block_numbers( + remote_head_block.number, + Some(remote_head_block.timestamp), + HeadType::Deposit, + ) .map_err(handle_remote_not_synced)?; let new_block_cache_numbers = service - .relevant_new_block_numbers(remote_head_block.number, HeadType::BlockCache) + .relevant_new_block_numbers( + remote_head_block.number, + Some(remote_head_block.timestamp), + HeadType::BlockCache, + ) .map_err(handle_remote_not_synced)?; Ok(( remote_head_block, @@ -299,15 +293,15 @@ async fn get_remote_head_and_new_block_ranges( /// Returns the range of new block numbers to be considered for the given head type from the given /// endpoint. async fn relevant_new_block_numbers_from_endpoint( - endpoint: &SensitiveUrl, + endpoint: &HttpJsonRpc, service: &Service, head_type: HeadType, ) -> Result>, SingleEndpointError> { - let remote_highest_block = - get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) - .map_err(SingleEndpointError::GetBlockNumberFailed) - .await?; - service.relevant_new_block_numbers(remote_highest_block, head_type) + let remote_highest_block = endpoint + .get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) + .map_err(SingleEndpointError::GetBlockNumberFailed) + .await?; + service.relevant_new_block_numbers(remote_highest_block, None, head_type) } #[derive(Debug, PartialEq)] @@ -319,7 +313,7 @@ pub enum SingleEndpointError { RemoteNotSynced { next_required_block: u64, remote_highest_block: u64, - reduced_follow_distance: u64, + cache_follow_distance: u64, }, /// Failed to download a block from the eth1 node. BlockDownloadFailed(String), @@ -363,14 +357,41 @@ pub struct DepositCacheUpdateOutcome { pub logs_imported: usize, } +/// Supports either one authenticated jwt JSON-RPC endpoint **or** +/// multiple non-authenticated endpoints with fallback. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum Eth1Endpoint { + Auth { + endpoint: SensitiveUrl, + jwt_path: PathBuf, + jwt_id: Option, + jwt_version: Option, + }, + NoAuth(Vec), +} + +impl Eth1Endpoint { + fn len(&self) -> usize { + match &self { + Self::Auth { .. } => 1, + Self::NoAuth(urls) => urls.len(), + } + } + + pub fn get_endpoints(&self) -> Vec { + match &self { + Self::Auth { endpoint, .. } => vec![endpoint.clone()], + Self::NoAuth(endpoints) => endpoints.clone(), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { /// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint. - pub endpoints: Vec, + pub endpoints: Eth1Endpoint, /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. pub deposit_contract_address: String, - /// The eth1 network id where the deposit contract is deployed (Goerli/Mainnet). - pub network_id: Eth1Id, /// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet). pub chain_id: Eth1Id, /// Defines the first block that the `DepositCache` will start searching for deposit logs. @@ -384,6 +405,11 @@ pub struct Config { /// /// Note: this should be less than or equal to the specification's `ETH1_FOLLOW_DISTANCE`. pub follow_distance: u64, + /// The follow distance to use for blocks in our cache. + /// + /// This can be set lower than the true follow distance in order to correct for poor timing + /// of eth1 blocks. + pub cache_follow_distance: Option, /// Specifies the seconds when we consider the head of a node far behind. /// This should be less than `ETH1_FOLLOW_DISTANCE * SECONDS_PER_ETH1_BLOCK`. pub node_far_behind_seconds: u64, @@ -410,34 +436,44 @@ impl Config { E::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - // Compute the number of extra blocks we store prior to the voting period start blocks. - let follow_distance_tolerance_blocks = - spec.eth1_follow_distance / ETH1_BLOCK_TIME_TOLERANCE_FACTOR; - // Ensure we can store two full windows of voting blocks. let voting_windows = eth1_blocks_per_voting_period * 2; - // Extend the cache to account for varying eth1 block times and the follow distance - // tolerance blocks. - let length = voting_windows - + (voting_windows / ETH1_BLOCK_TIME_TOLERANCE_FACTOR) - + follow_distance_tolerance_blocks; + // Extend the cache to account for the cache follow distance. + let extra_follow_distance_blocks = self + .follow_distance + .saturating_sub(self.cache_follow_distance()); - self.block_cache_truncation = Some(length as usize); + let length = voting_windows + extra_follow_distance_blocks; + + // Allow for more blocks to account for blocks being generated faster than expected. + // The cache expiry should really be timestamp based, but that would require a more + // extensive refactor. + let cache_size = CACHE_FACTOR * length; + + self.block_cache_truncation = Some(cache_size as usize); + } + + /// The distance at which the cache should follow the head. + /// + /// Defaults to 3/4 of `follow_distance` unless set manually. + pub fn cache_follow_distance(&self) -> u64 { + self.cache_follow_distance + .unwrap_or(3 * self.follow_distance / 4) } } impl Default for Config { fn default() -> Self { Self { - endpoints: vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) - .expect("The default Eth1 endpoint must always be a valid URL.")], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) + .expect("The default Eth1 endpoint must always be a valid URL.")]), deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), - network_id: DEFAULT_NETWORK_ID, chain_id: DEFAULT_CHAIN_ID, deposit_contract_deploy_block: 1, lowest_cached_block_number: 1, follow_distance: 128, + cache_follow_distance: None, node_far_behind_seconds: 128 * 14, block_cache_truncation: Some(4_096), auto_update_interval_millis: 60_000, @@ -486,9 +522,8 @@ impl Service { /// /// This is useful since the spec declares `SECONDS_PER_ETH1_BLOCK` to be `14`, whilst it is /// actually `15` on Goerli. - pub fn reduced_follow_distance(&self) -> u64 { - let full = self.config().follow_distance; - full.saturating_sub(full / ETH1_BLOCK_TIME_TOLERANCE_FACTOR) + pub fn cache_follow_distance(&self) -> u64 { + self.config().cache_follow_distance() } /// Return byte representation of deposit and block caches. @@ -642,27 +677,45 @@ impl Service { } /// Builds a new `EndpointsCache` with empty states. - pub fn init_endpoints(&self) -> Arc { + pub fn init_endpoints(&self) -> Result, String> { let endpoints = self.config().endpoints.clone(); - let config_network_id = self.config().network_id.clone(); let config_chain_id = self.config().chain_id.clone(); + + let servers = match endpoints { + Eth1Endpoint::Auth { + jwt_path, + endpoint, + jwt_id, + jwt_version, + } => { + let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) + .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; + vec![HttpJsonRpc::new_with_auth(endpoint, auth) + .map_err(|e| format!("Failed to build auth enabled json rpc {:?}", e))?] + } + Eth1Endpoint::NoAuth(urls) => urls + .into_iter() + .map(|url| { + HttpJsonRpc::new(url).map_err(|e| format!("Failed to build json rpc {:?}", e)) + }) + .collect::>()?, + }; let new_cache = Arc::new(EndpointsCache { - fallback: Fallback::new(endpoints.into_iter().map(EndpointWithState::new).collect()), - config_network_id, + fallback: Fallback::new(servers.into_iter().map(EndpointWithState::new).collect()), config_chain_id, log: self.log.clone(), }); let mut endpoints_cache = self.inner.endpoints_cache.write(); *endpoints_cache = Some(new_cache.clone()); - new_cache + Ok(new_cache) } /// Returns the cached `EndpointsCache` if it exists or builds a new one. - pub fn get_endpoints(&self) -> Arc { + pub fn get_endpoints(&self) -> Result, String> { let endpoints_cache = self.inner.endpoints_cache.read(); if let Some(cache) = endpoints_cache.clone() { - cache + Ok(cache) } else { drop(endpoints_cache); self.init_endpoints() @@ -680,7 +733,7 @@ impl Service { pub async fn update( &self, ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { - let endpoints = self.get_endpoints(); + let endpoints = self.get_endpoints()?; // Reset the state of any endpoints which have errored so their state can be redetermined. endpoints.reset_errorred_endpoints().await; @@ -694,20 +747,16 @@ impl Service { .iter() .all(|error| matches!(error, SingleEndpointError::EndpointError(_))) { - crit!( + error!( self.log, - "Could not connect to a suitable eth1 node. Please ensure that you have \ - an eth1 http server running locally on http://localhost:8545 or specify \ - one or more (remote) endpoints using \ - `--eth1-endpoints `. \ - Also ensure that `eth` and `net` apis are enabled on the eth1 http \ - server"; - "warning" => WARNING_MSG + "No synced execution endpoint"; + "advice" => "ensure you have an execution node configured via \ + --execution-endpoint or if pre-merge, --eth1-endpoints" ); } } } - endpoints.fallback.map_format_error(|s| &s.endpoint, e) + endpoints.fallback.map_format_error(|s| &s.client, e) }; let process_err = |e: Error| match &e { @@ -723,12 +772,7 @@ impl Service { get_remote_head_and_new_block_ranges(e, self, node_far_behind_seconds).await }) .await - .map_err(|e| { - format!( - "Failed to update Eth1 service: {:?}", - process_single_err(&e) - ) - })?; + .map_err(|e| format!("{:?}", process_single_err(&e)))?; if num_errors > 0 { info!(self.log, "Fetched data from fallback"; "fallback_number" => num_errors); @@ -737,19 +781,38 @@ impl Service { *self.inner.remote_head_block.write() = Some(remote_head_block); let update_deposit_cache = async { - let outcome = self + let outcome_result = self .update_deposit_cache(Some(new_block_numbers_deposit), &endpoints) - .await - .map_err(|e| { - format!("Failed to update eth1 deposit cache: {:?}", process_err(e)) - })?; + .await; + + // Reset the `last_procesed block` to the last valid deposit's block number. + // This will ensure that the next batch of blocks fetched is immediately after + // the last cached valid deposit allowing us to recover from scenarios where + // the deposit cache gets corrupted due to invalid responses from eth1 nodes. + if let Err(Error::FailedToInsertDeposit(DepositCacheError::NonConsecutive { + log_index: _, + expected: _, + })) = &outcome_result + { + let mut deposit_cache = self.inner.deposit_cache.write(); + debug!( + self.log, + "Resetting last processed block"; + "old_block_number" => deposit_cache.last_processed_block, + "new_block_number" => deposit_cache.cache.latest_block_number(), + ); + deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number(); + } + + let outcome = outcome_result + .map_err(|e| format!("Failed to update deposit cache: {:?}", process_err(e)))?; trace!( self.log, - "Updated eth1 deposit cache"; + "Updated deposit cache"; "cached_deposits" => self.inner.deposit_cache.read().cache.len(), "logs_imported" => outcome.logs_imported, - "last_processed_eth1_block" => self.inner.deposit_cache.read().last_processed_block, + "last_processed_execution_block" => self.inner.deposit_cache.read().last_processed_block, ); Ok::<_, String>(outcome) }; @@ -758,11 +821,16 @@ impl Service { let outcome = self .update_block_cache(Some(new_block_numbers_block_cache), &endpoints) .await - .map_err(|e| format!("Failed to update eth1 block cache: {:?}", process_err(e)))?; + .map_err(|e| { + format!( + "Failed to update deposit contract block cache: {:?}", + process_err(e) + ) + })?; trace!( self.log, - "Updated eth1 block cache"; + "Updated deposit contract block cache"; "cached_blocks" => self.inner.block_cache.read().len(), "blocks_imported" => outcome.blocks_imported, "head_block" => outcome.head_block_number, @@ -815,13 +883,13 @@ impl Service { match update_result { Err(e) => error!( self.log, - "Failed to update eth1 cache"; + "Error updating deposit contract cache"; "retry_millis" => update_interval.as_millis(), "error" => e, ), Ok((deposit, block)) => debug!( self.log, - "Updated eth1 cache"; + "Updated deposit contract cache"; "retry_millis" => update_interval.as_millis(), "blocks" => format!("{:?}", block), "deposits" => format!("{:?}", deposit), @@ -833,10 +901,12 @@ impl Service { /// Returns the range of new block numbers to be considered for the given head type. fn relevant_new_block_numbers( &self, - remote_highest_block: u64, + remote_highest_block_number: u64, + remote_highest_block_timestamp: Option, head_type: HeadType, ) -> Result>, SingleEndpointError> { - let follow_distance = self.reduced_follow_distance(); + let follow_distance = self.cache_follow_distance(); + let latest_cached_block = self.latest_cached_block(); let next_required_block = match head_type { HeadType::Deposit => self .deposits() @@ -844,16 +914,20 @@ impl Service { .last_processed_block .map(|n| n + 1) .unwrap_or_else(|| self.config().deposit_contract_deploy_block), - HeadType::BlockCache => self - .inner - .block_cache - .read() - .highest_block_number() - .map(|n| n + 1) + HeadType::BlockCache => latest_cached_block + .as_ref() + .map(|block| block.number + 1) .unwrap_or_else(|| self.config().lowest_cached_block_number), }; - relevant_block_range(remote_highest_block, next_required_block, follow_distance) + relevant_block_range( + remote_highest_block_number, + remote_highest_block_timestamp, + next_required_block, + follow_distance, + latest_cached_block.as_ref(), + &self.inner.spec, + ) } /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured @@ -928,15 +1002,15 @@ impl Service { */ let block_range_ref = &block_range; let logs = endpoints - .first_success(|e| async move { - get_deposit_logs_in_range( - e, - deposit_contract_address_ref, - block_range_ref.clone(), - Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), - ) - .await - .map_err(SingleEndpointError::GetDepositLogsFailed) + .first_success(|endpoint| async move { + endpoint + .get_deposit_logs_in_range( + deposit_contract_address_ref, + block_range_ref.clone(), + Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), + ) + .await + .map_err(SingleEndpointError::GetDepositLogsFailed) }) .await .map(|(res, _)| res) @@ -1099,7 +1173,7 @@ impl Service { debug!( self.log, - "Downloading eth1 blocks"; + "Downloading execution blocks"; "first" => ?required_block_numbers.first(), "last" => ?required_block_numbers.last(), ); @@ -1162,16 +1236,16 @@ impl Service { if blocks_imported > 0 { debug!( self.log, - "Imported eth1 block(s)"; + "Imported execution block(s)"; "latest_block_age" => latest_block_mins, "latest_block" => block_cache.highest_block_number(), "total_cached_blocks" => block_cache.len(), - "new" => blocks_imported + "new" => %blocks_imported ); } else { debug!( self.log, - "No new eth1 blocks imported"; + "No new execution blocks imported"; "latest_block" => block_cache.highest_block_number(), "cached_blocks" => block_cache.len(), ); @@ -1189,24 +1263,51 @@ impl Service { /// Returns an error if `next_required_block > remote_highest_block + 1` which means the remote went /// backwards. fn relevant_block_range( - remote_highest_block: u64, + remote_highest_block_number: u64, + remote_highest_block_timestamp: Option, next_required_block: u64, - reduced_follow_distance: u64, + cache_follow_distance: u64, + latest_cached_block: Option<&Eth1Block>, + spec: &ChainSpec, ) -> Result>, SingleEndpointError> { - let remote_follow_block = remote_highest_block.saturating_sub(reduced_follow_distance); + // If the latest cached block is lagging the head block by more than `cache_follow_distance` + // times the expected block time then the eth1 block time is likely quite different from what we + // assumed. + // + // In order to catch up, load batches of `CATCHUP_BATCH_SIZE` until the situation rights itself. + // Note that we need to check this condition before the regular follow distance condition + // or we will keep downloading small numbers of blocks. + if let (Some(remote_highest_block_timestamp), Some(latest_cached_block)) = + (remote_highest_block_timestamp, latest_cached_block) + { + let lagging = latest_cached_block.timestamp + + cache_follow_distance * spec.seconds_per_eth1_block + < remote_highest_block_timestamp; + let end_block = std::cmp::max( + std::cmp::min( + remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE), + next_required_block + CATCHUP_BATCH_SIZE, + ), + remote_highest_block_number.saturating_sub(cache_follow_distance), + ); + if lagging && next_required_block <= end_block { + return Ok(Some(next_required_block..=end_block)); + } + } + let remote_follow_block = remote_highest_block_number.saturating_sub(cache_follow_distance); if next_required_block <= remote_follow_block { Ok(Some(next_required_block..=remote_follow_block)) - } else if next_required_block > remote_highest_block + 1 { + } else if next_required_block > remote_highest_block_number + 1 { // If this is the case, the node must have gone "backwards" in terms of it's sync // (i.e., it's head block is lower than it was before). // - // We assume that the `reduced_follow_distance` should be sufficient to ensure this never + // We assume that the `cache_follow_distance` should be sufficient to ensure this never // happens, otherwise it is an error. Err(SingleEndpointError::RemoteNotSynced { next_required_block, - remote_highest_block, - reduced_follow_distance, + remote_highest_block: remote_highest_block_number, + cache_follow_distance, }) } else { // Return an empty range. @@ -1221,7 +1322,7 @@ fn relevant_block_range( /// /// Performs three async calls to an Eth1 HTTP JSON RPC endpoint. async fn download_eth1_block( - endpoint: &SensitiveUrl, + endpoint: &HttpJsonRpc, cache: Arc, block_number_opt: Option, ) -> Result { @@ -1242,15 +1343,15 @@ async fn download_eth1_block( }); // Performs a `get_blockByNumber` call to an eth1 node. - let http_block = get_block( - endpoint, - block_number_opt - .map(BlockQuery::Number) - .unwrap_or_else(|| BlockQuery::Latest), - Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), - ) - .map_err(SingleEndpointError::BlockDownloadFailed) - .await?; + let http_block = endpoint + .get_block( + block_number_opt + .map(BlockQuery::Number) + .unwrap_or_else(|| BlockQuery::Latest), + Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), + ) + .map_err(SingleEndpointError::BlockDownloadFailed) + .await?; Ok(Eth1Block { hash: http_block.hash, @@ -1275,8 +1376,8 @@ mod tests { #[test] fn serde_serialize() { let serialized = - toml::to_string(&Config::default()).expect("Should serde encode default config"); - toml::from_str::(&serialized).expect("Should serde decode default config"); + serde_yaml::to_string(&Config::default()).expect("Should serde encode default config"); + serde_yaml::from_str::(&serialized).expect("Should serde decode default config"); } #[test] @@ -1292,10 +1393,9 @@ mod tests { let seconds_per_voting_period = ::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - let reduce_follow_distance_blocks = - config.follow_distance / ETH1_BLOCK_TIME_TOLERANCE_FACTOR; + let cache_follow_distance_blocks = config.follow_distance - config.cache_follow_distance(); - let minimum_len = eth1_blocks_per_voting_period * 2 + reduce_follow_distance_blocks; + let minimum_len = eth1_blocks_per_voting_period * 2 + cache_follow_distance_blocks; assert!(len > minimum_len as usize); } diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index bb00ebaab1..f7f3b6e703 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -1,9 +1,9 @@ #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; -use eth1::http::{get_deposit_count, get_deposit_logs_in_range, get_deposit_root, Block, Log}; -use eth1::{Config, Service}; -use eth1::{DepositCache, DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; +use eth1::{Config, Eth1Endpoint, Service}; +use eth1::{DepositCache, DEFAULT_CHAIN_ID}; use eth1_test_rig::GanacheEth1Instance; +use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; use slog::Logger; @@ -51,39 +51,39 @@ fn random_deposit_data() -> DepositData { } /// Blocking operation to get the deposit logs from the `deposit_contract`. -async fn blocking_deposit_logs(eth1: &GanacheEth1Instance, range: Range) -> Vec { - get_deposit_logs_in_range( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ð1.deposit_contract.address(), - range, - timeout(), - ) - .await - .expect("should get logs") +async fn blocking_deposit_logs( + client: &HttpJsonRpc, + eth1: &GanacheEth1Instance, + range: Range, +) -> Vec { + client + .get_deposit_logs_in_range(ð1.deposit_contract.address(), range, timeout()) + .await + .expect("should get logs") } /// Blocking operation to get the deposit root from the `deposit_contract`. -async fn blocking_deposit_root(eth1: &GanacheEth1Instance, block_number: u64) -> Option { - get_deposit_root( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ð1.deposit_contract.address(), - block_number, - timeout(), - ) - .await - .expect("should get deposit root") +async fn blocking_deposit_root( + client: &HttpJsonRpc, + eth1: &GanacheEth1Instance, + block_number: u64, +) -> Option { + client + .get_deposit_root(ð1.deposit_contract.address(), block_number, timeout()) + .await + .expect("should get deposit root") } /// Blocking operation to get the deposit count from the `deposit_contract`. -async fn blocking_deposit_count(eth1: &GanacheEth1Instance, block_number: u64) -> Option { - get_deposit_count( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ð1.deposit_contract.address(), - block_number, - timeout(), - ) - .await - .expect("should get deposit count") +async fn blocking_deposit_count( + client: &HttpJsonRpc, + eth1: &GanacheEth1Instance, + block_number: u64, +) -> Option { + client + .get_deposit_count(ð1.deposit_contract.address(), block_number, timeout()) + .await + .expect("should get deposit count") } async fn get_block_number(web3: &Web3) -> u64 { @@ -95,7 +95,7 @@ async fn get_block_number(web3: &Web3) -> u64 { } async fn new_ganache_instance() -> Result { - GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()).await + GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await } mod eth1_cache { @@ -107,7 +107,7 @@ mod eth1_cache { async { let log = null_logger(); - for follow_distance in 0..2 { + for follow_distance in 0..3 { let eth1 = new_ganache_instance() .await .expect("should start eth1 environment"); @@ -116,17 +116,19 @@ mod eth1_cache { let initial_block_number = get_block_number(&web3).await; - let service = Service::new( - Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); + let config = Config { + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: initial_block_number, + follow_distance, + ..Config::default() + }; + let cache_follow_distance = config.cache_follow_distance(); + + let service = Service::new(config, log.clone(), MainnetEthSpec::default_spec()); // Create some blocks and then consume them, performing the test `rounds` times. for round in 0..2 { @@ -139,7 +141,7 @@ mod eth1_cache { .blocks() .read() .highest_block_number() - .map(|n| n + follow_distance) + .map(|n| n + cache_follow_distance) .expect("should have a latest block after the first round") }; @@ -147,7 +149,7 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) @@ -168,12 +170,13 @@ mod eth1_cache { .blocks() .read() .highest_block_number() - .map(|n| n + follow_distance), + .map(|n| n + cache_follow_distance), Some(initial + blocks), - "should update {} blocks in round {} (follow {})", + "should update {} blocks in round {} (follow {} i.e. {})", blocks, round, follow_distance, + cache_follow_distance ); } } @@ -198,7 +201,10 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -215,7 +221,7 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) @@ -252,7 +258,10 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -267,7 +276,7 @@ mod eth1_cache { for _ in 0..cache_len / 2 { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) .await @@ -302,7 +311,10 @@ mod eth1_cache { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: get_block_number(&web3).await, follow_distance: 0, @@ -316,7 +328,7 @@ mod eth1_cache { eth1.ganache.evm_mine().await.expect("should mine block") } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); futures::try_join!( service.update_deposit_cache(None, &endpoints), service.update_deposit_cache(None, &endpoints) @@ -354,7 +366,10 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, follow_distance: 0, @@ -374,7 +389,7 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) @@ -434,7 +449,10 @@ mod deposit_tree { let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: start_block, lowest_cached_block_number: start_block, @@ -454,7 +472,7 @@ mod deposit_tree { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); futures::try_join!( service.update_deposit_cache(None, &endpoints), service.update_deposit_cache(None, &endpoints) @@ -484,6 +502,8 @@ mod deposit_tree { let mut deposit_roots = vec![]; let mut deposit_counts = vec![]; + let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); + // Perform deposits to the smart contract, recording it's state along the way. for deposit in &deposits { deposit_contract @@ -492,12 +512,12 @@ mod deposit_tree { .expect("should perform a deposit"); let block_number = get_block_number(&web3).await; deposit_roots.push( - blocking_deposit_root(ð1, block_number) + blocking_deposit_root(&client, ð1, block_number) .await .expect("should get root if contract exists"), ); deposit_counts.push( - blocking_deposit_count(ð1, block_number) + blocking_deposit_count(&client, ð1, block_number) .await .expect("should get count if contract exists"), ); @@ -507,7 +527,7 @@ mod deposit_tree { // Pull all the deposit logs from the contract. let block_number = get_block_number(&web3).await; - let logs: Vec<_> = blocking_deposit_logs(ð1, 0..block_number) + let logs: Vec<_> = blocking_deposit_logs(&client, ð1, 0..block_number) .await .iter() .map(|raw| raw.to_deposit_log(spec).expect("should parse deposit log")) @@ -570,16 +590,12 @@ mod deposit_tree { /// Tests for the base HTTP requests and response handlers. mod http { use super::*; - use eth1::http::BlockQuery; - async fn get_block(eth1: &GanacheEth1Instance, block_number: u64) -> Block { - eth1::http::get_block( - &SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - BlockQuery::Number(block_number), - timeout(), - ) - .await - .expect("should get block number") + async fn get_block(client: &HttpJsonRpc, block_number: u64) -> Block { + client + .get_block(BlockQuery::Number(block_number), timeout()) + .await + .expect("should get block number") } #[tokio::test] @@ -590,17 +606,18 @@ mod http { .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; let web3 = eth1.web3(); + let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); let block_number = get_block_number(&web3).await; - let logs = blocking_deposit_logs(ð1, 0..block_number).await; + let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), 0); - let mut old_root = blocking_deposit_root(ð1, block_number).await; - let mut old_block = get_block(ð1, block_number).await; + let mut old_root = blocking_deposit_root(&client, ð1, block_number).await; + let mut old_block = get_block(&client, block_number).await; let mut old_block_number = block_number; assert_eq!( - blocking_deposit_count(ð1, block_number).await, + blocking_deposit_count(&client, ð1, block_number).await, Some(0), "should have deposit count zero" ); @@ -618,18 +635,18 @@ mod http { // Check the logs. let block_number = get_block_number(&web3).await; - let logs = blocking_deposit_logs(ð1, 0..block_number).await; + let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), i, "the number of logs should be as expected"); // Check the deposit count. assert_eq!( - blocking_deposit_count(ð1, block_number).await, + blocking_deposit_count(&client, ð1, block_number).await, Some(i as u64), "should have a correct deposit count" ); // Check the deposit root. - let new_root = blocking_deposit_root(ð1, block_number).await; + let new_root = blocking_deposit_root(&client, ð1, block_number).await; assert_ne!( new_root, old_root, "deposit root should change with each deposit" @@ -637,7 +654,7 @@ mod http { old_root = new_root; // Check the block hash. - let new_block = get_block(ð1, block_number).await; + let new_block = get_block(&client, block_number).await; assert_ne!( new_block.hash, old_block.hash, "block hash should change with each deposit" @@ -689,7 +706,10 @@ mod fast { let now = get_block_number(&web3).await; let service = Service::new( Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -700,6 +720,7 @@ mod fast { log, MainnetEthSpec::default_spec(), ); + let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap(); let n = 10; let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); for deposit in &deposits { @@ -711,7 +732,7 @@ mod fast { eth1.ganache.evm_mine().await.expect("should mine block"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) .await @@ -723,8 +744,9 @@ mod fast { ); for block_num in 0..=get_block_number(&web3).await { - let expected_deposit_count = blocking_deposit_count(ð1, block_num).await; - let expected_deposit_root = blocking_deposit_root(ð1, block_num).await; + let expected_deposit_count = + blocking_deposit_count(&client, ð1, block_num).await; + let expected_deposit_root = blocking_deposit_root(&client, ð1, block_num).await; let deposit_count = service .deposits() @@ -765,7 +787,10 @@ mod persist { let now = get_block_number(&web3).await; let config = Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, @@ -783,7 +808,7 @@ mod persist { .expect("should perform a deposit"); } - let endpoints = service.init_endpoints(); + let endpoints = service.init_endpoints().unwrap(); service .update_deposit_cache(None, &endpoints) .await @@ -874,10 +899,10 @@ mod fallbacks { let service = Service::new( Config { - endpoints: vec![ + endpoints: Eth1Endpoint::NoAuth(vec![ SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ], + ]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance: 0, @@ -909,82 +934,13 @@ mod fallbacks { .await; } - #[tokio::test] - async fn test_fallback_when_wrong_network_id() { - async { - let log = null_logger(); - let correct_network_id: u64 = DEFAULT_NETWORK_ID.into(); - let wrong_network_id = correct_network_id + 1; - let endpoint1 = GanacheEth1Instance::new(wrong_network_id, DEFAULT_CHAIN_ID.into()) - .await - .expect("should start eth1 environment"); - let endpoint2 = new_ganache_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = &endpoint2.deposit_contract; - - let initial_block_number = get_block_number(&endpoint2.web3()).await; - - // Create some blocks and then consume them, performing the test `rounds` times. - let new_blocks = 4; - - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - endpoint2 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - //additional blocks for endpoint1 to be able to distinguish - for _ in 0..new_blocks { - endpoint1 - .ganache - .evm_mine() - .await - .expect("should mine block"); - } - - let service = Service::new( - Config { - endpoints: vec![ - SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - ], - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance: 0, - ..Config::default() - }, - log.clone(), - MainnetEthSpec::default_spec(), - ); - - let endpoint1_block_number = get_block_number(&endpoint1.web3()).await; - let endpoint2_block_number = get_block_number(&endpoint2.web3()).await; - assert!(endpoint2_block_number < endpoint1_block_number); - //the call will fallback to endpoint2 - service.update().await.expect("should update deposit cache"); - assert_eq!( - service.deposits().read().last_processed_block.unwrap(), - endpoint2_block_number - ); - } - .await; - } - #[tokio::test] async fn test_fallback_when_wrong_chain_id() { async { let log = null_logger(); let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into(); let wrong_chain_id = correct_chain_id + 1; - let endpoint1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), wrong_chain_id) + let endpoint1 = GanacheEth1Instance::new(wrong_chain_id) .await .expect("should start eth1 environment"); let endpoint2 = new_ganache_instance() @@ -1021,10 +977,10 @@ mod fallbacks { let service = Service::new( Config { - endpoints: vec![ + endpoints: Eth1Endpoint::NoAuth(vec![ SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), - ], + ]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance: 0, @@ -1076,10 +1032,10 @@ mod fallbacks { let service = Service::new( Config { - endpoints: vec![ + endpoints: Eth1Endpoint::NoAuth(vec![ SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(), SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(), - ], + ]), deposit_contract_address: deposit_contract.address(), lowest_cached_block_number: initial_block_number, follow_distance: 0, diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 0351b5e433..770bc4cf8c 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -16,14 +16,16 @@ reqwest = { version = "0.11.0", features = ["json","stream"] } eth2_serde_utils = "0.1.1" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } -eth1 = { path = "../eth1" } warp = { version = "0.3.2", features = ["tls"] } jsonwebtoken = "8" environment = { path = "../../lighthouse/environment" } bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" +eth2_ssz = "0.4.1" eth2_ssz_types = "0.2.2" +eth2 = { path = "../../common/eth2" } +state_processing = { path = "../../consensus/state_processing" } lru = "0.7.1" exit-future = "0.2.0" tree_hash = "0.4.1" @@ -36,3 +38,10 @@ zeroize = { version = "1.4.2", features = ["zeroize_derive"] } lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +builder_client = { path = "../builder_client" } +fork_choice = { path = "../../consensus/fork_choice" } +mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", rev = "a088806575805c00d63fa59c002abc5eb1dc7709"} +ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e1188b1" } +ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" } +tokio-stream = { version = "0.1.9", features = [ "sync" ] } +strum = "0.24.0" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 64bc948c00..476fb251f2 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,12 +1,11 @@ use crate::engines::ForkChoiceState; -use async_trait::async_trait; -use eth1::http::RpcError; pub use ethers_core::types::Transaction; +use http::deposit_methods::RpcError; pub use json_structures::TransitionConfigurationV1; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; -use slog::Logger; use ssz_types::FixedVector; +use strum::IntoStaticStr; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, Hash256, Uint256, VariableList, @@ -29,10 +28,7 @@ pub enum Error { InvalidExecutePayloadResponse(&'static str), JsonRpc(RpcError), Json(serde_json::Error), - ServerMessage { - code: i64, - message: String, - }, + ServerMessage { code: i64, message: String }, Eip155Failure, IsSyncing, ExecutionBlockNotFound(ExecutionBlockHash), @@ -41,15 +37,9 @@ pub enum Error { PayloadIdUnavailable, TransitionConfigurationMismatch, PayloadConversionLogicFlaw, - InvalidBuilderQuery, - MissingPayloadId { - parent_hash: ExecutionBlockHash, - timestamp: u64, - prev_randao: Hash256, - suggested_fee_recipient: Address, - }, DeserializeTransaction(ssz_types::Error), DeserializeTransactions(ssz_types::Error), + BuilderApi(builder_client::Error), } impl From for Error { @@ -77,27 +67,20 @@ impl From for Error { } } -pub struct EngineApi; -pub struct BuilderApi; - -#[async_trait] -pub trait Builder { - async fn notify_forkchoice_updated( - &self, - forkchoice_state: ForkChoiceState, - payload_attributes: Option, - log: &Logger, - ) -> Result; +impl From for Error { + fn from(e: builder_client::Error) -> Self { + Error::BuilderApi(e) + } } -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq, IntoStaticStr)] +#[strum(serialize_all = "snake_case")] pub enum PayloadStatusV1Status { Valid, Invalid, Syncing, Accepted, InvalidBlockHash, - InvalidTerminalBlock, } #[derive(Clone, Debug, PartialEq)] @@ -125,6 +108,8 @@ pub struct ExecutionBlock { pub block_number: u64, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, + #[serde(with = "eth2_serde_utils::u64_hex_be")] + pub timestamp: u64, } /// Representation of an exection block with enough detail to reconstruct a payload. diff --git a/beacon_node/execution_layer/src/engine_api/auth.rs b/beacon_node/execution_layer/src/engine_api/auth.rs index a4050a25c0..8fcdb2543d 100644 --- a/beacon_node/execution_layer/src/engine_api/auth.rs +++ b/beacon_node/execution_layer/src/engine_api/auth.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use jsonwebtoken::{encode, get_current_timestamp, Algorithm, EncodingKey, Header}; use rand::Rng; use serde::{Deserialize, Serialize}; @@ -13,6 +15,7 @@ pub const JWT_SECRET_LENGTH: usize = 32; pub enum Error { JWT(jsonwebtoken::errors::Error), InvalidToken, + InvalidKey(String), } impl From for Error { @@ -22,7 +25,7 @@ impl From for Error { } /// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`. -#[derive(Zeroize)] +#[derive(Zeroize, Clone)] #[zeroize(drop)] pub struct JwtKey([u8; JWT_SECRET_LENGTH as usize]); @@ -57,6 +60,14 @@ impl JwtKey { } } +pub fn strip_prefix(s: &str) -> &str { + if let Some(stripped) = s.strip_prefix("0x") { + stripped + } else { + s + } +} + /// Contains the JWT secret and claims parameters. pub struct Auth { key: EncodingKey, @@ -73,6 +84,28 @@ impl Auth { } } + /// Create a new `Auth` struct given the path to the file containing the hex + /// encoded jwt key. + pub fn new_with_path( + jwt_path: PathBuf, + id: Option, + clv: Option, + ) -> Result { + std::fs::read_to_string(&jwt_path) + .map_err(|e| { + Error::InvalidKey(format!( + "Failed to read JWT secret file {:?}, error: {:?}", + jwt_path, e + )) + }) + .and_then(|ref s| { + let secret_bytes = hex::decode(strip_prefix(s.trim_end())) + .map_err(|e| Error::InvalidKey(format!("Invalid hex string: {:?}", e)))?; + let secret = JwtKey::from_slice(&secret_bytes).map_err(Error::InvalidKey)?; + Ok(Self::new(secret, id, clv)) + }) + } + /// Generate a JWT token with `claims.iat` set to current time. pub fn generate_token(&self) -> Result { let claims = self.generate_claims_at_timestamp(); @@ -126,12 +159,12 @@ pub struct Claims { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::JWT_SECRET; + use crate::test_utils::DEFAULT_JWT_SECRET; #[test] fn test_roundtrip() { let auth = Auth::new( - JwtKey::from_slice(&JWT_SECRET).unwrap(), + JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), Some("42".into()), Some("Lighthouse".into()), ); @@ -139,7 +172,7 @@ mod tests { let token = auth.generate_token_with_claims(&claims).unwrap(); assert_eq!( - Auth::validate_token(&token, &JwtKey::from_slice(&JWT_SECRET).unwrap()) + Auth::validate_token(&token, &JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()) .unwrap() .claims, claims diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 179045ccf8..0f848a7716 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -3,15 +3,15 @@ use super::*; use crate::auth::Auth; use crate::json_structures::*; -use eth1::http::EIP155_ERROR_STR; use reqwest::header::CONTENT_TYPE; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde_json::json; -use std::marker::PhantomData; -use std::time::Duration; -use types::{BlindedPayload, EthSpec, ExecutionPayloadHeader, SignedBeaconBlock}; +use std::time::Duration; +use types::EthSpec; + +pub use deposit_log::{DepositLog, Log}; pub use reqwest::Client; const STATIC_ID: u32 = 1; @@ -26,42 +26,507 @@ pub const ETH_GET_BLOCK_BY_HASH: &str = "eth_getBlockByHash"; pub const ETH_GET_BLOCK_BY_HASH_TIMEOUT: Duration = Duration::from_secs(1); pub const ETH_SYNCING: &str = "eth_syncing"; -pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_millis(250); +pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; -pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(6); +pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; -pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(6); +pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str = "engine_exchangeTransitionConfigurationV1"; -pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = - Duration::from_millis(500); +pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1); -pub const BUILDER_GET_PAYLOAD_HEADER_V1: &str = "builder_getPayloadHeaderV1"; -pub const BUILDER_GET_PAYLOAD_HEADER_TIMEOUT: Duration = Duration::from_secs(2); +/// This error is returned during a `chainId` call by Geth. +pub const EIP155_ERROR_STR: &str = "chain not synced beyond EIP-155 replay-protection fork block"; -pub const BUILDER_PROPOSE_BLINDED_BLOCK_V1: &str = "builder_proposeBlindedBlockV1"; -pub const BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT: Duration = Duration::from_secs(2); +/// Contains methods to convert arbitary bytes to an ETH2 deposit contract object. +pub mod deposit_log { + use ssz::Decode; + use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; + use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes}; -pub struct HttpJsonRpc { + pub use eth2::lighthouse::DepositLog; + + /// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The + /// event bytes are formatted according to the Ethereum ABI. + const PUBKEY_START: usize = 192; + const PUBKEY_LEN: usize = 48; + const CREDS_START: usize = PUBKEY_START + 64 + 32; + const CREDS_LEN: usize = 32; + const AMOUNT_START: usize = CREDS_START + 32 + 32; + const AMOUNT_LEN: usize = 8; + const SIG_START: usize = AMOUNT_START + 32 + 32; + const SIG_LEN: usize = 96; + const INDEX_START: usize = SIG_START + 96 + 32; + const INDEX_LEN: usize = 8; + + /// A reduced set of fields from an Eth1 contract log. + #[derive(Debug, PartialEq, Clone)] + pub struct Log { + pub block_number: u64, + pub data: Vec, + } + + impl Log { + /// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`. + pub fn to_deposit_log(&self, spec: &ChainSpec) -> Result { + let bytes = &self.data; + + let pubkey = bytes + .get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN) + .ok_or("Insufficient bytes for pubkey")?; + let withdrawal_credentials = bytes + .get(CREDS_START..CREDS_START + CREDS_LEN) + .ok_or("Insufficient bytes for withdrawal credential")?; + let amount = bytes + .get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN) + .ok_or("Insufficient bytes for amount")?; + let signature = bytes + .get(SIG_START..SIG_START + SIG_LEN) + .ok_or("Insufficient bytes for signature")?; + let index = bytes + .get(INDEX_START..INDEX_START + INDEX_LEN) + .ok_or("Insufficient bytes for index")?; + + let deposit_data = DepositData { + pubkey: PublicKeyBytes::from_ssz_bytes(pubkey) + .map_err(|e| format!("Invalid pubkey ssz: {:?}", e))?, + withdrawal_credentials: Hash256::from_ssz_bytes(withdrawal_credentials) + .map_err(|e| format!("Invalid withdrawal_credentials ssz: {:?}", e))?, + amount: u64::from_ssz_bytes(amount) + .map_err(|e| format!("Invalid amount ssz: {:?}", e))?, + signature: SignatureBytes::from_ssz_bytes(signature) + .map_err(|e| format!("Invalid signature ssz: {:?}", e))?, + }; + + let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec) + .map_or(false, |(public_key, signature, msg)| { + signature.verify(&public_key, msg) + }); + + Ok(DepositLog { + deposit_data, + block_number: self.block_number, + index: u64::from_ssz_bytes(index) + .map_err(|e| format!("Invalid index ssz: {:?}", e))?, + signature_is_valid, + }) + } + } + + #[cfg(test)] + pub mod tests { + use super::*; + use types::{EthSpec, MainnetEthSpec}; + + /// The data from a deposit event, using the v0.8.3 version of the deposit contract. + pub const EXAMPLE_LOG: &[u8] = &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, + 3, 51, 6, 4, 158, 232, 82, 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, + 64, 213, 43, 52, 175, 154, 239, 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, + 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, 30, 63, 215, 238, 113, 60, + 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, 119, 88, 51, 80, 101, + 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, 187, 22, 95, 4, 211, + 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, 149, 250, 251, + 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, 18, 113, + 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + + #[test] + fn can_parse_example_log() { + let log = Log { + block_number: 42, + data: EXAMPLE_LOG.to_vec(), + }; + log.to_deposit_log(&MainnetEthSpec::default_spec()) + .expect("should decode log"); + } + } +} + +/// Contains subset of the HTTP JSON-RPC methods used to query an execution node for +/// state of the deposit contract. +pub mod deposit_methods { + use super::Log; + use crate::HttpJsonRpc; + use serde::{Deserialize, Serialize}; + use serde_json::{json, Value}; + use std::fmt; + use std::ops::Range; + use std::str::FromStr; + use std::time::Duration; + use types::Hash256; + + /// `keccak("DepositEvent(bytes,bytes,bytes,bytes,bytes)")` + pub const DEPOSIT_EVENT_TOPIC: &str = + "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"; + /// `keccak("get_deposit_root()")[0..4]` + pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0xc5f2892f"; + /// `keccak("get_deposit_count()")[0..4]` + pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130"; + + /// Number of bytes in deposit contract deposit root response. + pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96; + /// Number of bytes in deposit contract deposit root (value only). + pub const DEPOSIT_ROOT_BYTES: usize = 32; + + /// Represents an eth1 chain/network id. + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] + pub enum Eth1Id { + Goerli, + Mainnet, + Custom(u64), + } + + #[derive(Debug, PartialEq, Clone)] + pub struct Block { + pub hash: Hash256, + pub timestamp: u64, + pub number: u64, + } + + /// Used to identify a block when querying the Eth1 node. + #[derive(Clone, Copy)] + pub enum BlockQuery { + Number(u64), + Latest, + } + + impl Into for Eth1Id { + fn into(self) -> u64 { + match self { + Eth1Id::Mainnet => 1, + Eth1Id::Goerli => 5, + Eth1Id::Custom(id) => id, + } + } + } + + impl From for Eth1Id { + fn from(id: u64) -> Self { + let into = |x: Eth1Id| -> u64 { x.into() }; + match id { + id if id == into(Eth1Id::Mainnet) => Eth1Id::Mainnet, + id if id == into(Eth1Id::Goerli) => Eth1Id::Goerli, + id => Eth1Id::Custom(id), + } + } + } + + impl FromStr for Eth1Id { + type Err = String; + + fn from_str(s: &str) -> Result { + s.parse::() + .map(Into::into) + .map_err(|e| format!("Failed to parse eth1 network id {}", e)) + } + } + + /// Represents an error received from a remote procecdure call. + #[derive(Debug, Serialize, Deserialize)] + pub enum RpcError { + NoResultField, + Eip155Error, + InvalidJson(String), + Error(String), + } + + impl fmt::Display for RpcError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RpcError::NoResultField => write!(f, "No result field in response"), + RpcError::Eip155Error => write!(f, "Not synced past EIP-155"), + RpcError::InvalidJson(e) => write!(f, "Malformed JSON received: {}", e), + RpcError::Error(s) => write!(f, "{}", s), + } + } + } + + impl From for String { + fn from(e: RpcError) -> String { + e.to_string() + } + } + + /// Parses a `0x`-prefixed, **big-endian** hex string as a u64. + /// + /// Note: the JSON-RPC encodes integers as big-endian. The deposit contract uses little-endian. + /// Therefore, this function is only useful for numbers encoded by the JSON RPC. + /// + /// E.g., `0x01 == 1` + fn hex_to_u64_be(hex: &str) -> Result { + u64::from_str_radix(strip_prefix(hex)?, 16) + .map_err(|e| format!("Failed to parse hex as u64: {:?}", e)) + } + + /// Parses a `0x`-prefixed, big-endian hex string as bytes. + /// + /// E.g., `0x0102 == vec![1, 2]` + fn hex_to_bytes(hex: &str) -> Result, String> { + hex::decode(strip_prefix(hex)?) + .map_err(|e| format!("Failed to parse hex as bytes: {:?}", e)) + } + + /// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present. + fn strip_prefix(hex: &str) -> Result<&str, String> { + if let Some(stripped) = hex.strip_prefix("0x") { + Ok(stripped) + } else { + Err("Hex string did not start with `0x`".to_string()) + } + } + + impl HttpJsonRpc { + /// Get the eth1 chain id of the given endpoint. + pub async fn get_chain_id(&self, timeout: Duration) -> Result { + let chain_id: String = self + .rpc_request("eth_chainId", json!([]), timeout) + .await + .map_err(|e| format!("eth_chainId call failed {:?}", e))?; + hex_to_u64_be(chain_id.as_str()).map(|id| id.into()) + } + + /// Returns the current block number. + pub async fn get_block_number(&self, timeout: Duration) -> Result { + let response: String = self + .rpc_request("eth_blockNumber", json!([]), timeout) + .await + .map_err(|e| format!("eth_blockNumber call failed {:?}", e))?; + hex_to_u64_be(response.as_str()) + .map_err(|e| format!("Failed to get block number: {}", e)) + } + + /// Gets a block hash by block number. + pub async fn get_block( + &self, + query: BlockQuery, + timeout: Duration, + ) -> Result { + let query_param = match query { + BlockQuery::Number(block_number) => format!("0x{:x}", block_number), + BlockQuery::Latest => "latest".to_string(), + }; + let params = json!([ + query_param, + false // do not return full tx objects. + ]); + + let response: Value = self + .rpc_request("eth_getBlockByNumber", params, timeout) + .await + .map_err(|e| format!("eth_getBlockByNumber call failed {:?}", e))?; + + let hash: Vec = hex_to_bytes( + response + .get("hash") + .ok_or("No hash for block")? + .as_str() + .ok_or("Block hash was not string")?, + )?; + let hash: Hash256 = if hash.len() == 32 { + Hash256::from_slice(&hash) + } else { + return Err(format!("Block hash was not 32 bytes: {:?}", hash)); + }; + + let timestamp = hex_to_u64_be( + response + .get("timestamp") + .ok_or("No timestamp for block")? + .as_str() + .ok_or("Block timestamp was not string")?, + )?; + + let number = hex_to_u64_be( + response + .get("number") + .ok_or("No number for block")? + .as_str() + .ok_or("Block number was not string")?, + )?; + + if number <= usize::max_value() as u64 { + Ok(Block { + hash, + timestamp, + number, + }) + } else { + Err(format!("Block number {} is larger than a usize", number)) + } + .map_err(|e| format!("Failed to get block number: {}", e)) + } + + /// Returns the value of the `get_deposit_count()` call at the given `address` for the given + /// `block_number`. + /// + /// Assumes that the `address` has the same ABI as the eth2 deposit contract. + pub async fn get_deposit_count( + &self, + address: &str, + block_number: u64, + timeout: Duration, + ) -> Result, String> { + let result = self + .call(address, DEPOSIT_COUNT_FN_SIGNATURE, block_number, timeout) + .await?; + match result { + None => Err("Deposit root response was none".to_string()), + Some(bytes) => { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES { + let mut array = [0; 8]; + array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]); + Ok(Some(u64::from_le_bytes(array))) + } else { + Err(format!( + "Deposit count response was not {} bytes: {:?}", + DEPOSIT_COUNT_RESPONSE_BYTES, bytes + )) + } + } + } + } + + /// Returns the value of the `get_hash_tree_root()` call at the given `block_number`. + /// + /// Assumes that the `address` has the same ABI as the eth2 deposit contract. + pub async fn get_deposit_root( + &self, + address: &str, + block_number: u64, + timeout: Duration, + ) -> Result, String> { + let result = self + .call(address, DEPOSIT_ROOT_FN_SIGNATURE, block_number, timeout) + .await?; + match result { + None => Err("Deposit root response was none".to_string()), + Some(bytes) => { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_ROOT_BYTES { + Ok(Some(Hash256::from_slice(&bytes))) + } else { + Err(format!( + "Deposit root response was not {} bytes: {:?}", + DEPOSIT_ROOT_BYTES, bytes + )) + } + } + } + } + + /// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed + /// `hex_data`. + /// + /// Returns bytes, if any. + async fn call( + &self, + address: &str, + hex_data: &str, + block_number: u64, + timeout: Duration, + ) -> Result>, String> { + let params = json! ([ + { + "to": address, + "data": hex_data, + }, + format!("0x{:x}", block_number) + ]); + + let response: Option = self + .rpc_request("eth_call", params, timeout) + .await + .map_err(|e| format!("eth_call call failed {:?}", e))?; + + response.map(|s| hex_to_bytes(&s)).transpose() + } + + /// Returns logs for the `DEPOSIT_EVENT_TOPIC`, for the given `address` in the given + /// `block_height_range`. + /// + /// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not. + pub async fn get_deposit_logs_in_range( + &self, + address: &str, + block_height_range: Range, + timeout: Duration, + ) -> Result, String> { + let params = json! ([{ + "address": address, + "topics": [DEPOSIT_EVENT_TOPIC], + "fromBlock": format!("0x{:x}", block_height_range.start), + "toBlock": format!("0x{:x}", block_height_range.end), + }]); + + let response: Value = self + .rpc_request("eth_getLogs", params, timeout) + .await + .map_err(|e| format!("eth_getLogs call failed {:?}", e))?; + response + .as_array() + .cloned() + .ok_or("'result' value was not an array")? + .into_iter() + .map(|value| { + let block_number = value + .get("blockNumber") + .ok_or("No block number field in log")? + .as_str() + .ok_or("Block number was not string")?; + + let data = value + .get("data") + .ok_or("No block number field in log")? + .as_str() + .ok_or("Data was not string")?; + + Ok(Log { + block_number: hex_to_u64_be(block_number)?, + data: hex_to_bytes(data)?, + }) + }) + .collect::, String>>() + .map_err(|e| format!("Failed to get logs in range: {}", e)) + } + } +} + +pub struct HttpJsonRpc { pub client: Client, pub url: SensitiveUrl, auth: Option, - _phantom: PhantomData, } -impl HttpJsonRpc { +impl HttpJsonRpc { pub fn new(url: SensitiveUrl) -> Result { Ok(Self { client: Client::builder().build()?, url, auth: None, - _phantom: PhantomData, }) } @@ -70,7 +535,6 @@ impl HttpJsonRpc { client: Client::builder().build()?, url, auth: Some(auth), - _phantom: PhantomData, }) } @@ -117,7 +581,13 @@ impl HttpJsonRpc { } } -impl HttpJsonRpc { +impl std::fmt::Display for HttpJsonRpc { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}, auth={}", self.url, self.auth.is_some()) + } +} + +impl HttpJsonRpc { pub async fn upcheck(&self) -> Result<(), Error> { let result: serde_json::Value = self .rpc_request(ETH_SYNCING, json!([]), ETH_SYNCING_TIMEOUT) @@ -233,67 +703,11 @@ impl HttpJsonRpc { } } -impl HttpJsonRpc { - pub async fn get_payload_header_v1( - &self, - payload_id: PayloadId, - ) -> Result, Error> { - let params = json!([JsonPayloadIdRequest::from(payload_id)]); - - let response: JsonExecutionPayloadHeaderV1 = self - .rpc_request( - BUILDER_GET_PAYLOAD_HEADER_V1, - params, - BUILDER_GET_PAYLOAD_HEADER_TIMEOUT, - ) - .await?; - - Ok(response.into()) - } - - pub async fn forkchoice_updated_v1( - &self, - forkchoice_state: ForkChoiceState, - payload_attributes: Option, - ) -> Result { - let params = json!([ - JsonForkChoiceStateV1::from(forkchoice_state), - payload_attributes.map(JsonPayloadAttributesV1::from) - ]); - - let response: JsonForkchoiceUpdatedV1Response = self - .rpc_request( - ENGINE_FORKCHOICE_UPDATED_V1, - params, - ENGINE_FORKCHOICE_UPDATED_TIMEOUT, - ) - .await?; - - Ok(response.into()) - } - - pub async fn propose_blinded_block_v1( - &self, - block: SignedBeaconBlock>, - ) -> Result, Error> { - let params = json!([block]); - - let response: JsonExecutionPayloadV1 = self - .rpc_request( - BUILDER_PROPOSE_BLINDED_BLOCK_V1, - params, - BUILDER_PROPOSE_BLINDED_BLOCK_TIMEOUT, - ) - .await?; - - Ok(response.into()) - } -} #[cfg(test)] mod test { use super::auth::JwtKey; use super::*; - use crate::test_utils::{MockServer, JWT_SECRET}; + use crate::test_utils::{MockServer, DEFAULT_JWT_SECRET}; use std::future::Future; use std::str::FromStr; use std::sync::Arc; @@ -313,8 +727,10 @@ mod test { let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); // Create rpc clients that include JWT auth headers if `with_auth` is true. let (rpc_client, echo_client) = if with_auth { - let rpc_auth = Auth::new(JwtKey::from_slice(&JWT_SECRET).unwrap(), None, None); - let echo_auth = Auth::new(JwtKey::from_slice(&JWT_SECRET).unwrap(), None, None); + let rpc_auth = + Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); + let echo_auth = + Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None); ( Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth).unwrap()), Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth).unwrap()), diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 3ebe82602f..204acf1a23 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -78,6 +78,7 @@ pub struct JsonExecutionPayloadHeaderV1 { pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, + #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, pub transactions_root: Hash256, @@ -142,6 +143,7 @@ pub struct JsonExecutionPayloadV1 { pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, + #[serde(with = "eth2_serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] @@ -320,7 +322,6 @@ pub enum JsonPayloadStatusV1Status { Syncing, Accepted, InvalidBlockHash, - InvalidTerminalBlock, } #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -339,9 +340,6 @@ impl From for JsonPayloadStatusV1Status { PayloadStatusV1Status::Syncing => JsonPayloadStatusV1Status::Syncing, PayloadStatusV1Status::Accepted => JsonPayloadStatusV1Status::Accepted, PayloadStatusV1Status::InvalidBlockHash => JsonPayloadStatusV1Status::InvalidBlockHash, - PayloadStatusV1Status::InvalidTerminalBlock => { - JsonPayloadStatusV1Status::InvalidTerminalBlock - } } } } @@ -353,9 +351,6 @@ impl From for PayloadStatusV1Status { JsonPayloadStatusV1Status::Syncing => PayloadStatusV1Status::Syncing, JsonPayloadStatusV1Status::Accepted => PayloadStatusV1Status::Accepted, JsonPayloadStatusV1Status::InvalidBlockHash => PayloadStatusV1Status::InvalidBlockHash, - JsonPayloadStatusV1Status::InvalidTerminalBlock => { - PayloadStatusV1Status::InvalidTerminalBlock - } } } } @@ -430,62 +425,10 @@ impl From for JsonForkchoiceUpdatedV1Response { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum JsonProposeBlindedBlockResponseStatus { - Valid, - Invalid, - Syncing, -} -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(bound = "E: EthSpec")] -pub struct JsonProposeBlindedBlockResponse { - pub result: ExecutionPayload, - pub error: Option, -} - -impl From> for ExecutionPayload { - fn from(j: JsonProposeBlindedBlockResponse) -> Self { - let JsonProposeBlindedBlockResponse { result, error: _ } = j; - result - } -} - -impl From for ProposeBlindedBlockResponseStatus { - fn from(j: JsonProposeBlindedBlockResponseStatus) -> Self { - match j { - JsonProposeBlindedBlockResponseStatus::Valid => { - ProposeBlindedBlockResponseStatus::Valid - } - JsonProposeBlindedBlockResponseStatus::Invalid => { - ProposeBlindedBlockResponseStatus::Invalid - } - JsonProposeBlindedBlockResponseStatus::Syncing => { - ProposeBlindedBlockResponseStatus::Syncing - } - } - } -} -impl From for JsonProposeBlindedBlockResponseStatus { - fn from(f: ProposeBlindedBlockResponseStatus) -> Self { - match f { - ProposeBlindedBlockResponseStatus::Valid => { - JsonProposeBlindedBlockResponseStatus::Valid - } - ProposeBlindedBlockResponseStatus::Invalid => { - JsonProposeBlindedBlockResponseStatus::Invalid - } - ProposeBlindedBlockResponseStatus::Syncing => { - JsonProposeBlindedBlockResponseStatus::Syncing - } - } - } -} - #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { + #[serde(with = "eth2_serde_utils::u256_hex_be")] pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, #[serde(with = "eth2_serde_utils::u64_hex_be")] diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 719db74c54..339006c1ba 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -1,16 +1,16 @@ //! Provides generic behaviour for multiple execution engines, specifically fallback behaviour. use crate::engine_api::{ - Builder, EngineApi, Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, - PayloadId, + Error as EngineApiError, ForkchoiceUpdatedResponse, PayloadAttributes, PayloadId, }; -use crate::{BuilderApi, HttpJsonRpc}; -use async_trait::async_trait; -use futures::future::join_all; +use crate::HttpJsonRpc; use lru::LruCache; -use slog::{crit, debug, info, warn, Logger}; +use slog::{debug, error, info, Logger}; use std::future::Future; -use tokio::sync::{Mutex, RwLock}; +use std::sync::Arc; +use task_executor::TaskExecutor; +use tokio::sync::{watch, Mutex, RwLock}; +use tokio_stream::wrappers::WatchStream; use types::{Address, ExecutionBlockHash, Hash256}; /// The number of payload IDs that will be stored for each `Engine`. @@ -19,14 +19,74 @@ use types::{Address, ExecutionBlockHash, Hash256}; const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; /// Stores the remembered state of a engine. -#[derive(Copy, Clone, PartialEq)] -enum EngineState { +#[derive(Copy, Clone, PartialEq, Debug, Eq, Default)] +enum EngineStateInternal { Synced, + #[default] Offline, Syncing, AuthFailed, } +/// A subset of the engine state to inform other services if the engine is online or offline. +#[derive(Debug, Clone, PartialEq, Eq, Copy)] +pub enum EngineState { + Online, + Offline, +} + +impl From for EngineState { + fn from(state: EngineStateInternal) -> Self { + match state { + EngineStateInternal::Synced | EngineStateInternal::Syncing => EngineState::Online, + EngineStateInternal::Offline | EngineStateInternal::AuthFailed => EngineState::Offline, + } + } +} + +/// Wrapper structure that ensures changes to the engine state are correctly reported to watchers. +struct State { + /// The actual engine state. + state: EngineStateInternal, + /// Notifier to watch the engine state. + notifier: watch::Sender, +} + +impl std::ops::Deref for State { + type Target = EngineStateInternal; + + fn deref(&self) -> &Self::Target { + &self.state + } +} + +impl Default for State { + fn default() -> Self { + let state = EngineStateInternal::default(); + let (notifier, _receiver) = watch::channel(state.into()); + State { state, notifier } + } +} + +impl State { + // Updates the state and notifies all watchers if the state has changed. + pub fn update(&mut self, new_state: EngineStateInternal) { + self.state = new_state; + self.notifier.send_if_modified(|last_state| { + let changed = *last_state != new_state.into(); // notify conditionally + *last_state = new_state.into(); // update the state unconditionally + changed + }); + } + + /// Gives access to a channel containing whether the last state is online. + /// + /// This can be called several times. + pub fn watch(&self) -> WatchStream { + self.notifier.subscribe().into() + } +} + #[derive(Copy, Clone, PartialEq, Debug)] pub struct ForkChoiceState { pub head_block_hash: ExecutionBlockHash, @@ -34,22 +94,6 @@ pub struct ForkChoiceState { pub finalized_block_hash: ExecutionBlockHash, } -/// Used to enable/disable logging on some tasks. -#[derive(Copy, Clone, PartialEq)] -pub enum Logging { - Enabled, - Disabled, -} - -impl Logging { - pub fn is_enabled(&self) -> bool { - match self { - Logging::Enabled => true, - Logging::Disabled => false, - } - } -} - #[derive(Hash, PartialEq, std::cmp::Eq)] struct PayloadIdCacheKey { pub head_block_hash: ExecutionBlockHash, @@ -58,25 +102,44 @@ struct PayloadIdCacheKey { pub suggested_fee_recipient: Address, } -/// An execution engine. -pub struct Engine { - pub id: String, - pub api: HttpJsonRpc, - payload_id_cache: Mutex>, - state: RwLock, +#[derive(Debug)] +pub enum EngineError { + Offline, + Api { error: EngineApiError }, + BuilderApi { error: EngineApiError }, + Auth, } -impl Engine { +/// An execution engine. +pub struct Engine { + pub api: HttpJsonRpc, + payload_id_cache: Mutex>, + state: RwLock, + latest_forkchoice_state: RwLock>, + executor: TaskExecutor, + log: Logger, +} + +impl Engine { /// Creates a new, offline engine. - pub fn new(id: String, api: HttpJsonRpc) -> Self { + pub fn new(api: HttpJsonRpc, executor: TaskExecutor, log: &Logger) -> Self { Self { - id, api, payload_id_cache: Mutex::new(LruCache::new(PAYLOAD_ID_LRU_CACHE_SIZE)), - state: RwLock::new(EngineState::Offline), + state: Default::default(), + latest_forkchoice_state: Default::default(), + executor, + log: log.clone(), } } + /// Gives access to a channel containing the last engine state. + /// + /// This can be called several times. + pub async fn watch_state(&self) -> WatchStream { + self.state.read().await.watch() + } + pub async fn get_payload_id( &self, head_block_hash: ExecutionBlockHash, @@ -95,11 +158,8 @@ impl Engine { }) .cloned() } -} -#[async_trait] -impl Builder for Engine { - async fn notify_forkchoice_updated( + pub async fn notify_forkchoice_updated( &self, forkchoice_state: ForkChoiceState, payload_attributes: Option, @@ -126,57 +186,7 @@ impl Builder for Engine { Ok(response) } -} -#[async_trait] -impl Builder for Engine { - async fn notify_forkchoice_updated( - &self, - forkchoice_state: ForkChoiceState, - pa: Option, - log: &Logger, - ) -> Result { - let payload_attributes = pa.ok_or(EngineApiError::InvalidBuilderQuery)?; - let response = self - .api - .forkchoice_updated_v1(forkchoice_state, Some(payload_attributes)) - .await?; - - if let Some(payload_id) = response.payload_id { - let key = PayloadIdCacheKey::new(&forkchoice_state, &payload_attributes); - self.payload_id_cache.lock().await.put(key, payload_id); - } else { - warn!( - log, - "Builder should have returned a payload_id for attributes {:?}", payload_attributes - ); - } - - Ok(response) - } -} - -/// Holds multiple execution engines and provides functionality for managing them in a fallback -/// manner. -pub struct Engines { - pub engines: Vec>, - pub latest_forkchoice_state: RwLock>, - pub log: Logger, -} - -pub struct Builders { - pub builders: Vec>, - pub log: Logger, -} - -#[derive(Debug)] -pub enum EngineError { - Offline { id: String }, - Api { id: String, error: EngineApiError }, - Auth { id: String }, -} - -impl Engines { async fn get_latest_forkchoice_state(&self) -> Option { *self.latest_forkchoice_state.read().await } @@ -185,7 +195,7 @@ impl Engines { *self.latest_forkchoice_state.write().await = Some(state); } - async fn send_latest_forkchoice_state(&self, engine: &Engine) { + async fn send_latest_forkchoice_state(&self) { let latest_forkchoice_state = self.get_latest_forkchoice_state().await; if let Some(forkchoice_state) = latest_forkchoice_state { @@ -194,7 +204,6 @@ impl Engines { self.log, "No need to call forkchoiceUpdated"; "msg" => "head does not have execution enabled", - "id" => &engine.id, ); return; } @@ -203,323 +212,138 @@ impl Engines { self.log, "Issuing forkchoiceUpdated"; "forkchoice_state" => ?forkchoice_state, - "id" => &engine.id, ); // For simplicity, payload attributes are never included in this call. It may be // reasonable to include them in the future. - if let Err(e) = engine - .api - .forkchoice_updated_v1(forkchoice_state, None) - .await - { + if let Err(e) = self.api.forkchoice_updated_v1(forkchoice_state, None).await { debug!( self.log, "Failed to issue latest head to engine"; "error" => ?e, - "id" => &engine.id, ); } } else { debug!( self.log, "No head, not sending to engine"; - "id" => &engine.id, ); } } - /// Returns `true` if there is at least one engine with a "synced" status. - pub async fn any_synced(&self) -> bool { - for engine in &self.engines { - if *engine.state.read().await == EngineState::Synced { - return true; - } - } - false + /// Returns `true` if the engine has a "synced" status. + pub async fn is_synced(&self) -> bool { + **self.state.read().await == EngineStateInternal::Synced } - /// Run the `EngineApi::upcheck` function on all nodes which are currently offline. - /// - /// This can be used to try and recover any offline nodes. - pub async fn upcheck_not_synced(&self, logging: Logging) { - let upcheck_futures = self.engines.iter().map(|engine| async move { - let mut state_lock = engine.state.write().await; - if *state_lock != EngineState::Synced { - match engine.api.upcheck().await { - Ok(()) => { - if logging.is_enabled() { - info!( - self.log, - "Execution engine online"; - "id" => &engine.id - ); - } + /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This + /// might be used to recover the node if offline. + pub async fn upcheck(&self) { + let state: EngineStateInternal = match self.api.upcheck().await { + Ok(()) => { + let mut state = self.state.write().await; + if **state != EngineStateInternal::Synced { + info!( + self.log, + "Execution engine online"; + ); - // Send the node our latest forkchoice_state. - self.send_latest_forkchoice_state(engine).await; - - *state_lock = EngineState::Synced - } - Err(EngineApiError::IsSyncing) => { - if logging.is_enabled() { - warn!( - self.log, - "Execution engine syncing"; - "id" => &engine.id - ) - } - - // Send the node our latest forkchoice_state, it may assist with syncing. - self.send_latest_forkchoice_state(engine).await; - - *state_lock = EngineState::Syncing - } - Err(EngineApiError::Auth(err)) => { - if logging.is_enabled() { - warn!( - self.log, - "Failed jwt authorization"; - "error" => ?err, - "id" => &engine.id - ); - } - - *state_lock = EngineState::AuthFailed - } - Err(e) => { - if logging.is_enabled() { - warn!( - self.log, - "Execution engine offline"; - "error" => ?e, - "id" => &engine.id - ) - } - } - } - } - *state_lock - }); - - let num_synced = join_all(upcheck_futures) - .await - .into_iter() - .filter(|state: &EngineState| *state == EngineState::Synced) - .count(); - - if num_synced == 0 && logging.is_enabled() { - crit!( - self.log, - "No synced execution engines"; - ) - } - } - - /// Run `func` on all engines, in the order in which they are defined, returning the first - /// successful result that is found. - /// - /// This function might try to run `func` twice. If all nodes return an error on the first time - /// it runs, it will try to upcheck all offline nodes and then run the function again. - pub async fn first_success<'a, F, G, H>(&'a self, func: F) -> Result> - where - F: Fn(&'a Engine) -> G + Copy, - G: Future>, - { - match self.first_success_without_retry(func).await { - Ok(result) => Ok(result), - Err(mut first_errors) => { - // Try to recover some nodes. - self.upcheck_not_synced(Logging::Enabled).await; - // Retry the call on all nodes. - match self.first_success_without_retry(func).await { - Ok(result) => Ok(result), - Err(second_errors) => { - first_errors.extend(second_errors); - Err(first_errors) - } - } - } - } - } - - /// Run `func` on all engines, in the order in which they are defined, returning the first - /// successful result that is found. - pub async fn first_success_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Result> - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let mut errors = vec![]; - - for engine in &self.engines { - let (engine_synced, engine_auth_failed) = { - let state = engine.state.read().await; - ( - *state == EngineState::Synced, - *state == EngineState::AuthFailed, - ) - }; - if engine_synced { - match func(engine).await { - Ok(result) => return Ok(result), - Err(error) => { - debug!( - self.log, - "Execution engine call failed"; - "error" => ?error, - "id" => &engine.id - ); - *engine.state.write().await = EngineState::Offline; - errors.push(EngineError::Api { - id: engine.id.clone(), - error, - }) - } - } - } else if engine_auth_failed { - errors.push(EngineError::Auth { - id: engine.id.clone(), - }) - } else { - errors.push(EngineError::Offline { - id: engine.id.clone(), - }) - } - } - - Err(errors) - } - - /// Runs `func` on all nodes concurrently, returning all results. Any nodes that are offline - /// will be ignored, however all synced or unsynced nodes will receive the broadcast. - /// - /// This function might try to run `func` twice. If all nodes return an error on the first time - /// it runs, it will try to upcheck all offline nodes and then run the function again. - pub async fn broadcast<'a, F, G, H>(&'a self, func: F) -> Vec> - where - F: Fn(&'a Engine) -> G + Copy, - G: Future>, - { - let first_results = self.broadcast_without_retry(func).await; - - let mut any_offline = false; - for result in &first_results { - match result { - Ok(_) => return first_results, - Err(EngineError::Offline { .. }) => any_offline = true, - _ => (), - } - } - - if any_offline { - self.upcheck_not_synced(Logging::Enabled).await; - self.broadcast_without_retry(func).await - } else { - first_results - } - } - - /// Runs `func` on all nodes concurrently, returning all results. - pub async fn broadcast_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Vec> - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let func = &func; - let futures = self.engines.iter().map(|engine| async move { - let is_offline = *engine.state.read().await == EngineState::Offline; - if !is_offline { - match func(engine).await { - Ok(res) => Ok(res), - Err(error) => { - debug!( - self.log, - "Execution engine call failed"; - "error" => ?error, - "id" => &engine.id - ); - *engine.state.write().await = EngineState::Offline; - Err(EngineError::Api { - id: engine.id.clone(), - error, - }) - } - } - } else { - Err(EngineError::Offline { - id: engine.id.clone(), - }) - } - }); - - join_all(futures).await - } -} - -impl Builders { - pub async fn first_success_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Result> - where - F: Fn(&'a Engine) -> G, - G: Future>, - { - let mut errors = vec![]; - - for builder in &self.builders { - match func(builder).await { - Ok(result) => return Ok(result), - Err(error) => { + // Send the node our latest forkchoice_state. + self.send_latest_forkchoice_state().await; + } else { debug!( self.log, - "Builder call failed"; - "error" => ?error, - "id" => &builder.id + "Execution engine online"; ); - errors.push(EngineError::Api { - id: builder.id.clone(), - error, - }) } + state.update(EngineStateInternal::Synced); + **state } - } + Err(EngineApiError::IsSyncing) => { + let mut state = self.state.write().await; + state.update(EngineStateInternal::Syncing); + **state + } + Err(EngineApiError::Auth(err)) => { + error!( + self.log, + "Failed jwt authorization"; + "error" => ?err, + ); - Err(errors) + let mut state = self.state.write().await; + state.update(EngineStateInternal::AuthFailed); + **state + } + Err(e) => { + error!( + self.log, + "Error during execution engine upcheck"; + "error" => ?e, + ); + + let mut state = self.state.write().await; + state.update(EngineStateInternal::Offline); + **state + } + }; + + debug!( + self.log, + "Execution engine upcheck complete"; + "state" => ?state, + ); } - pub async fn broadcast_without_retry<'a, F, G, H>( - &'a self, - func: F, - ) -> Vec> + /// Run `func` on the node regardless of the node's current state. + /// + /// ## Note + /// + /// This function takes locks on `self.state`, holding a conflicting lock might cause a + /// deadlock. + pub async fn request<'a, F, G, H>(self: &'a Arc, func: F) -> Result where - F: Fn(&'a Engine) -> G, + F: Fn(&'a Engine) -> G, G: Future>, { - let func = &func; - let futures = self.builders.iter().map(|engine| async move { - func(engine).await.map_err(|error| { - debug!( - self.log, - "Builder call failed"; - "error" => ?error, - "id" => &engine.id - ); - EngineError::Api { - id: engine.id.clone(), - error, - } - }) - }); + match func(self).await { + Ok(result) => { + // Take a clone *without* holding the read-lock since the `upcheck` function will + // take a write-lock. + let state: EngineStateInternal = **self.state.read().await; - join_all(futures).await + // Keep an up to date engine state. + if state != EngineStateInternal::Synced { + // Spawn the upcheck in another task to avoid slowing down this request. + let inner_self = self.clone(); + self.executor.spawn( + async move { inner_self.upcheck().await }, + "upcheck_after_success", + ); + } + + Ok(result) + } + Err(error) => { + error!( + self.log, + "Execution engine call failed"; + "error" => ?error, + ); + + // The node just returned an error, run an upcheck so we can update the endpoint + // state. + // + // Spawn the upcheck in another task to avoid slowing down this request. + let inner_self = self.clone(); + self.executor.spawn( + async move { inner_self.upcheck().await }, + "upcheck_after_error", + ); + + Err(EngineError::Api { error }) + } + } } } @@ -533,3 +357,22 @@ impl PayloadIdCacheKey { } } } + +#[cfg(test)] +mod tests { + use super::*; + use tokio_stream::StreamExt; + + #[tokio::test] + async fn test_state_notifier() { + let mut state = State::default(); + let initial_state: EngineState = state.state.into(); + assert_eq!(initial_state, EngineState::Offline); + state.update(EngineStateInternal::Synced); + + // a watcher that arrives after the first update. + let mut watcher = state.watch(); + let new_state = watcher.next().await.expect("Last state is always present"); + assert_eq!(new_state, EngineState::Online); + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 5aa4edd74a..89dc3f68e9 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,23 +4,23 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. -use crate::engine_api::Builder; -use crate::engines::Builders; -use auth::{Auth, JwtKey}; +use crate::payload_cache::PayloadCache; +use auth::{strip_prefix, Auth, JwtKey}; +use builder_client::BuilderHttpClient; use engine_api::Error as ApiError; pub use engine_api::*; -pub use engine_api::{http, http::HttpJsonRpc}; -pub use engines::ForkChoiceState; -use engines::{Engine, EngineError, Engines, Logging}; +pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; +use engines::{Engine, EngineError}; +pub use engines::{EngineState, ForkChoiceState}; +use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; -use payload_status::process_multiple_payload_statuses; +use payload_status::process_payload_status; pub use payload_status::PayloadStatus; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{crit, debug, error, info, trace, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use std::collections::HashMap; -use std::convert::TryInto; use std::future::Future; use std::io::Write; use std::path::PathBuf; @@ -29,19 +29,24 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::{ sync::{Mutex, MutexGuard, RwLock}, - time::{sleep, sleep_until, Instant}, + time::sleep, }; +use tokio_stream::wrappers::WatchStream; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, - ProposerPreparationData, SignedBeaconBlock, Slot, + BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, + ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, }; mod engine_api; mod engines; mod metrics; +pub mod payload_cache; mod payload_status; pub mod test_utils; +/// Indicates the default jwt authenticated execution endpoint. +pub const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/"; + /// Name for the default file used for the jwt secret. pub const DEFAULT_JWT_FILE: &str = "jwt.hex"; @@ -63,14 +68,15 @@ const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60); #[derive(Debug)] pub enum Error { - NoEngines, + NoEngine, NoPayloadBuilder, ApiError(ApiError), - EngineErrors(Vec), + Builder(builder_client::Error), + NoHeaderFromBuilder, + EngineError(Box), NotSynced, ShuttingDown, FeeRecipientUnspecified, - ConsensusFailure, MissingLatestValidHash, InvalidJWTSecret(String), } @@ -99,15 +105,38 @@ pub struct Proposer { payload_attributes: PayloadAttributes, } -struct Inner { - engines: Engines, - builders: Builders, +/// Information from the beacon chain that is necessary for querying the builder API. +pub struct BuilderParams { + pub pubkey: PublicKeyBytes, + pub slot: Slot, + pub chain_health: ChainHealth, +} + +pub enum ChainHealth { + Healthy, + Unhealthy(FailedCondition), + Optimistic, + PreMerge, +} + +#[derive(Debug)] +pub enum FailedCondition { + Skips, + SkipsPerEpoch, + EpochsSinceFinalization, +} + +struct Inner { + engine: Arc, + builder: Option, execution_engine_forkchoice_lock: Mutex<()>, suggested_fee_recipient: Option
, proposer_preparation_data: Mutex>, execution_blocks: Mutex>, proposers: RwLock>, executor: TaskExecutor, + payload_cache: PayloadCache, + builder_profit_threshold: Uint256, log: Logger, } @@ -116,7 +145,7 @@ pub struct Config { /// Endpoint urls for EL nodes that are running the engine api. pub execution_endpoints: Vec, /// Endpoint urls for services providing the builder api. - pub builder_endpoints: Vec, + pub builder_url: Option, /// JWT secrets for the above endpoints running the engine api. pub secret_files: Vec, /// The default fee recipient to use on the beacon node if none if provided from @@ -128,128 +157,99 @@ pub struct Config { pub jwt_version: Option, /// Default directory for the jwt secret if not provided through cli. pub default_datadir: PathBuf, + /// The minimum value of an external payload for it to be considered in a proposal. + pub builder_profit_threshold: u128, } -fn strip_prefix(s: &str) -> &str { - if let Some(stripped) = s.strip_prefix("0x") { - stripped - } else { - s - } -} - -/// Provides access to one or more execution engines and provides a neat interface for consumption -/// by the `BeaconChain`. -/// -/// When there is more than one execution node specified, the others will be used in a "fallback" -/// fashion. Some requests may be broadcast to all nodes and others might only be sent to the first -/// node that returns a valid response. Ultimately, the purpose of fallback nodes is to provide -/// redundancy in the case where one node is offline. -/// -/// The fallback nodes have an ordering. The first supplied will be the first contacted, and so on. +/// Provides access to one execution engine and provides a neat interface for consumption by the +/// `BeaconChain`. #[derive(Clone)] -pub struct ExecutionLayer { - inner: Arc, +pub struct ExecutionLayer { + inner: Arc>, } -impl ExecutionLayer { - /// Instantiate `Self` with Execution engines specified using `Config`, all using the JSON-RPC via HTTP. +impl ExecutionLayer { + /// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP. pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result { let Config { execution_endpoints: urls, - builder_endpoints: builder_urls, - mut secret_files, + builder_url, + secret_files, suggested_fee_recipient, jwt_id, jwt_version, default_datadir, + builder_profit_threshold, } = config; - if urls.is_empty() { - return Err(Error::NoEngines); + if urls.len() > 1 { + warn!(log, "Only the first execution engine url will be used"); } + let execution_url = urls.into_iter().next().ok_or(Error::NoEngine)?; - // Extend the jwt secret files with the default jwt secret path if not provided via cli. - // This ensures that we have a jwt secret for every EL. - secret_files.extend(vec![ - default_datadir.join(DEFAULT_JWT_FILE); - urls.len().saturating_sub(secret_files.len()) - ]); - - let secrets: Vec<(JwtKey, PathBuf)> = secret_files - .iter() - .map(|p| { - // Read secret from file if it already exists - if p.exists() { - std::fs::read_to_string(p) - .map_err(|e| { - format!("Failed to read JWT secret file {:?}, error: {:?}", p, e) - }) - .and_then(|ref s| { - let secret = JwtKey::from_slice( - &hex::decode(strip_prefix(s.trim_end())) - .map_err(|e| format!("Invalid hex string: {:?}", e))?, - )?; - Ok((secret, p.to_path_buf())) - }) - } else { - // Create a new file and write a randomly generated secret to it if file does not exist - std::fs::File::options() - .write(true) - .create_new(true) - .open(p) - .map_err(|e| { - format!("Failed to open JWT secret file {:?}, error: {:?}", p, e) - }) - .and_then(|mut f| { - let secret = auth::JwtKey::random(); - f.write_all(secret.hex_string().as_bytes()).map_err(|e| { - format!("Failed to write to JWT secret file: {:?}", e) - })?; - Ok((secret, p.to_path_buf())) - }) - } - }) - .collect::>() - .map_err(Error::InvalidJWTSecret)?; - - let engines: Vec> = urls + // Use the default jwt secret path if not provided via cli. + let secret_file = secret_files .into_iter() - .zip(secrets.into_iter()) - .map(|(url, (secret, path))| { - let id = url.to_string(); - let auth = Auth::new(secret, jwt_id.clone(), jwt_version.clone()); - debug!(log, "Loaded execution endpoint"; "endpoint" => %id, "jwt_path" => ?path); - let api = HttpJsonRpc::::new_with_auth(url, auth)?; - Ok(Engine::::new(id, api)) - }) - .collect::>()?; + .next() + .unwrap_or_else(|| default_datadir.join(DEFAULT_JWT_FILE)); - let builders: Vec> = builder_urls - .into_iter() + let jwt_key = if secret_file.exists() { + // Read secret from file if it already exists + std::fs::read_to_string(&secret_file) + .map_err(|e| format!("Failed to read JWT secret file. Error: {:?}", e)) + .and_then(|ref s| { + let secret = JwtKey::from_slice( + &hex::decode(strip_prefix(s.trim_end())) + .map_err(|e| format!("Invalid hex string: {:?}", e))?, + )?; + Ok(secret) + }) + .map_err(Error::InvalidJWTSecret) + } else { + // Create a new file and write a randomly generated secret to it if file does not exist + std::fs::File::options() + .write(true) + .create_new(true) + .open(&secret_file) + .map_err(|e| format!("Failed to open JWT secret file. Error: {:?}", e)) + .and_then(|mut f| { + let secret = auth::JwtKey::random(); + f.write_all(secret.hex_string().as_bytes()) + .map_err(|e| format!("Failed to write to JWT secret file: {:?}", e))?; + Ok(secret) + }) + .map_err(Error::InvalidJWTSecret) + }?; + + let engine: Engine = { + let auth = Auth::new(jwt_key, jwt_id, jwt_version); + debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path()); + let api = HttpJsonRpc::new_with_auth(execution_url, auth).map_err(Error::ApiError)?; + Engine::new(api, executor.clone(), &log) + }; + + let builder = builder_url .map(|url| { - let id = url.to_string(); - let api = HttpJsonRpc::::new(url)?; - Ok(Engine::::new(id, api)) + let builder_client = BuilderHttpClient::new(url.clone()).map_err(Error::Builder); + info!(log, + "Connected to external block builder"; + "builder_url" => ?url, + "builder_profit_threshold" => builder_profit_threshold); + builder_client }) - .collect::>()?; + .transpose()?; let inner = Inner { - engines: Engines { - engines, - latest_forkchoice_state: <_>::default(), - log: log.clone(), - }, - builders: Builders { - builders, - log: log.clone(), - }, + engine: Arc::new(engine), + builder, execution_engine_forkchoice_lock: <_>::default(), suggested_fee_recipient, proposer_preparation_data: Mutex::new(HashMap::new()), proposers: RwLock::new(HashMap::new()), execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, + payload_cache: PayloadCache::default(), + builder_profit_threshold: Uint256::from(builder_profit_threshold), log, }; @@ -259,19 +259,39 @@ impl ExecutionLayer { } } -impl ExecutionLayer { - fn engines(&self) -> &Engines { - &self.inner.engines +impl ExecutionLayer { + fn engine(&self) -> &Arc { + &self.inner.engine } - fn builders(&self) -> &Builders { - &self.inner.builders + pub fn builder(&self) -> &Option { + &self.inner.builder + } + + /// Cache a full payload, keyed on the `tree_hash_root` of its `transactions` field. + fn cache_payload(&self, payload: &ExecutionPayload) -> Option> { + self.inner.payload_cache.put(payload.clone()) + } + + /// Attempt to retrieve a full payload from the payload cache by the `transactions_root`. + pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { + self.inner.payload_cache.pop(root) } pub fn executor(&self) -> &TaskExecutor { &self.inner.executor } + /// Get the current difficulty of the PoW chain. + pub async fn get_current_difficulty(&self) -> Result { + let block = self + .engine() + .api + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) + .await? + .ok_or(ApiError::ExecutionHeadBlockNotFound)?; + Ok(block.total_difficulty) + } /// Note: this function returns a mutex guard, be careful to avoid deadlocks. async fn execution_blocks( &self, @@ -279,6 +299,13 @@ impl ExecutionLayer { self.inner.execution_blocks.lock().await } + /// Gives access to a channel containing if the last engine state is online or not. + /// + /// This can be called several times. + pub async fn get_responsiveness_watch(&self) -> WatchStream { + self.engine().watch_state().await + } + /// Note: this function returns a mutex guard, be careful to avoid deadlocks. async fn proposer_preparation_data( &self, @@ -298,88 +325,27 @@ impl ExecutionLayer { self.inner.execution_engine_forkchoice_lock.lock().await } - /// Convenience function to allow calling async functions in a non-async context. - pub fn block_on<'a, T, U, V>(&'a self, generate_future: T) -> Result - where - T: Fn(&'a Self) -> U, - U: Future>, - { - let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; - // TODO(merge): respect the shutdown signal. - runtime.block_on(generate_future(self)) - } - - /// Convenience function to allow calling async functions in a non-async context. - /// - /// The function is "generic" since it does not enforce a particular return type on - /// `generate_future`. - pub fn block_on_generic<'a, T, U, V>(&'a self, generate_future: T) -> Result - where - T: Fn(&'a Self) -> U, - U: Future, - { - let runtime = self.executor().handle().ok_or(Error::ShuttingDown)?; - // TODO(merge): respect the shutdown signal. - Ok(runtime.block_on(generate_future(self))) - } - /// Convenience function to allow spawning a task without waiting for the result. - pub fn spawn(&self, generate_future: T, name: &'static str) + pub fn spawn(&self, generate_future: F, name: &'static str) where - T: FnOnce(Self) -> U, + F: FnOnce(Self) -> U, U: Future + Send + 'static, { self.executor().spawn(generate_future(self.clone()), name); } - /// Spawns a routine which attempts to keep the execution engines online. + /// Spawns a routine which attempts to keep the execution engine online. pub fn spawn_watchdog_routine(&self, slot_clock: S) { - let watchdog = |el: ExecutionLayer| async move { + let watchdog = |el: ExecutionLayer| async move { // Run one task immediately. el.watchdog_task().await; - let recurring_task = - |el: ExecutionLayer, now: Instant, duration_to_next_slot: Duration| async move { - // We run the task three times per slot. - // - // The interval between each task is 1/3rd of the slot duration. This matches nicely - // with the attestation production times (unagg. at 1/3rd, agg at 2/3rd). - // - // Each task is offset by 3/4ths of the interval. - // - // On mainnet, this means we will run tasks at: - // - // - 3s after slot start: 1s before publishing unaggregated attestations. - // - 7s after slot start: 1s before publishing aggregated attestations. - // - 11s after slot start: 1s before the next slot starts. - let interval = duration_to_next_slot / 3; - let offset = (interval / 4) * 3; - - let first_execution = duration_to_next_slot + offset; - let second_execution = first_execution + interval; - let third_execution = second_execution + interval; - - sleep_until(now + first_execution).await; - el.engines().upcheck_not_synced(Logging::Disabled).await; - - sleep_until(now + second_execution).await; - el.engines().upcheck_not_synced(Logging::Disabled).await; - - sleep_until(now + third_execution).await; - el.engines().upcheck_not_synced(Logging::Disabled).await; - }; - // Start the loop to periodically update. loop { - if let Some(duration) = slot_clock.duration_to_next_slot() { - let now = Instant::now(); - - // Spawn a new task rather than waiting for this to finish. This ensure that a - // slow run doesn't prevent the next run from starting. - el.spawn(|el| recurring_task(el, now, duration), "exec_watchdog_task"); - } else { - error!(el.log(), "Failed to spawn watchdog task"); - } + el.spawn( + |el| async move { el.watchdog_task().await }, + "exec_watchdog_task", + ); sleep(slot_clock.slot_duration()).await; } }; @@ -389,16 +355,12 @@ impl ExecutionLayer { /// Performs a single execution of the watchdog routine. pub async fn watchdog_task(&self) { - // Disable logging since this runs frequently and may get annoying. - self.engines().upcheck_not_synced(Logging::Disabled).await; + self.engine().upcheck().await; } /// Spawns a routine which cleans the cached proposer data periodically. - pub fn spawn_clean_proposer_caches_routine( - &self, - slot_clock: S, - ) { - let preparation_cleaner = |el: ExecutionLayer| async move { + pub fn spawn_clean_proposer_caches_routine(&self, slot_clock: S) { + let preparation_cleaner = |el: ExecutionLayer| async move { // Start the loop to periodically clean proposer preparation cache. loop { if let Some(duration_to_next_epoch) = @@ -412,7 +374,7 @@ impl ExecutionLayer { .map(|slot| slot.epoch(T::slots_per_epoch())) { Some(current_epoch) => el - .clean_proposer_caches::(current_epoch) + .clean_proposer_caches(current_epoch) .await .map_err(|e| { error!( @@ -437,7 +399,7 @@ impl ExecutionLayer { /// Spawns a routine that polls the `exchange_transition_configuration` endpoint. pub fn spawn_transition_configuration_poll(&self, spec: ChainSpec) { - let routine = |el: ExecutionLayer| async move { + let routine = |el: ExecutionLayer| async move { loop { if let Err(e) = el.exchange_transition_configuration(&spec).await { error!( @@ -453,25 +415,36 @@ impl ExecutionLayer { self.spawn(routine, "exec_config_poll"); } - /// Returns `true` if there is at least one synced and reachable engine. + /// Returns `true` if the execution engine is synced and reachable. pub async fn is_synced(&self) -> bool { - self.engines().any_synced().await + self.engine().is_synced().await } - /// Updates the proposer preparation data provided by validators - pub fn update_proposer_preparation_blocking( - &self, - update_epoch: Epoch, - preparation_data: &[ProposerPreparationData], - ) -> Result<(), Error> { - self.block_on_generic(|_| async move { - self.update_proposer_preparation(update_epoch, preparation_data) + /// Execution nodes return a "SYNCED" response when they do not have any peers. + /// + /// This function is a wrapper over `Self::is_synced` that makes an additional + /// check for the execution layer sync status. Checks if the latest block has + /// a `block_number != 0`. + /// Returns the `Self::is_synced` response if unable to get latest block. + pub async fn is_synced_for_notifier(&self) -> bool { + let synced = self.is_synced().await; + if synced { + if let Ok(Some(block)) = self + .engine() + .api + .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) .await - }) + { + if block.block_number == 0 { + return false; + } + } + } + synced } /// Updates the proposer preparation data provided by validators - async fn update_proposer_preparation( + pub async fn update_proposer_preparation( &self, update_epoch: Epoch, preparation_data: &[ProposerPreparationData], @@ -493,7 +466,7 @@ impl ExecutionLayer { } /// Removes expired entries from proposer_preparation_data and proposers caches - async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> { + async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> { let mut proposer_preparation_data = self.proposer_preparation_data().await; // Keep all entries that have been updated in the last 2 epochs @@ -561,141 +534,360 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - pub async fn get_payload>( + #[allow(clippy::too_many_arguments)] + pub async fn get_payload>( &self, parent_hash: ExecutionBlockHash, timestamp: u64, prev_randao: Hash256, - finalized_block_hash: ExecutionBlockHash, proposer_index: u64, + forkchoice_update_params: ForkchoiceUpdateParameters, + builder_params: BuilderParams, + spec: &ChainSpec, ) -> Result { - let _timer = metrics::start_timer_vec( - &metrics::EXECUTION_LAYER_REQUEST_TIMES, - &[metrics::GET_PAYLOAD], - ); - let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; match Payload::block_type() { BlockType::Blinded => { - debug!( - self.log(), - "Issuing builder_getPayloadHeader"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, + let _timer = metrics::start_timer_vec( + &metrics::EXECUTION_LAYER_REQUEST_TIMES, + &[metrics::GET_BLINDED_PAYLOAD], ); - self.builders() - .first_success_without_retry(|engine| async move { - let payload_id = engine - .get_payload_id( - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - ) - .await - .ok_or(ApiError::MissingPayloadId { - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - })?; - engine - .api - .get_payload_header_v1::(payload_id) - .await? - .try_into() - .map_err(|_| ApiError::PayloadConversionLogicFlaw) - }) - .await - .map_err(Error::EngineErrors) + self.get_blinded_payload( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + builder_params, + spec, + ) + .await } BlockType::Full => { - debug!( - self.log(), - "Issuing engine_getPayload"; - "suggested_fee_recipient" => ?suggested_fee_recipient, - "prev_randao" => ?prev_randao, - "timestamp" => timestamp, - "parent_hash" => ?parent_hash, + let _timer = metrics::start_timer_vec( + &metrics::EXECUTION_LAYER_REQUEST_TIMES, + &[metrics::GET_PAYLOAD], ); - self.engines() - .first_success(|engine| async move { - let payload_id = if let Some(id) = engine - .get_payload_id( - parent_hash, - timestamp, - prev_randao, - suggested_fee_recipient, - ) - .await - { - // The payload id has been cached for this engine. - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, - &[metrics::HIT], - ); - id - } else { - // The payload id has *not* been cached for this engine. Trigger an artificial - // fork choice update to retrieve a payload ID. - // - // TODO(merge): a better algorithm might try to favour a node that already had a - // cached payload id, since a payload that has had more time to produce is - // likely to be more profitable. - metrics::inc_counter_vec( - &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, - &[metrics::MISS], - ); - let fork_choice_state = ForkChoiceState { - head_block_hash: parent_hash, - safe_block_hash: parent_hash, - finalized_block_hash, - }; - let payload_attributes = PayloadAttributes { - timestamp, - prev_randao, - suggested_fee_recipient, - }; - - let response = engine - .notify_forkchoice_updated( - fork_choice_state, - Some(payload_attributes), - self.log(), - ) - .await?; - - match response.payload_id { - Some(payload_id) => payload_id, - None => { - error!( - self.log(), - "Exec engine unable to produce payload"; - "msg" => "No payload ID, the engine is likely syncing. \ - This has the potential to cause a missed block \ - proposal.", - "status" => ?response.payload_status - ); - return Err(ApiError::PayloadIdUnavailable); - } - } - }; - - engine - .api - .get_payload_v1::(payload_id) - .await - .map(Into::into) - }) - .await - .map_err(Error::EngineErrors) + self.get_full_payload( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + ) + .await } } } + #[allow(clippy::too_many_arguments)] + async fn get_blinded_payload>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, + builder_params: BuilderParams, + spec: &ChainSpec, + ) -> Result { + if let Some(builder) = self.builder() { + let slot = builder_params.slot; + let pubkey = builder_params.pubkey; + + match builder_params.chain_health { + ChainHealth::Healthy => { + info!( + self.log(), + "Requesting blinded header from connected builder"; + "slot" => ?slot, + "pubkey" => ?pubkey, + "parent_hash" => ?parent_hash, + ); + let (relay_result, local_result) = tokio::join!( + builder.get_builder_header::(slot, parent_hash, &pubkey), + self.get_full_payload_caching( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + ) + ); + + return match (relay_result, local_result) { + (Err(e), Ok(local)) => { + warn!( + self.log(), + "Unable to retrieve a payload from a connected \ + builder, falling back to the local execution client: {e:?}" + ); + Ok(local) + } + (Ok(None), Ok(local)) => { + info!( + self.log(), + "No payload provided by connected builder. \ + Attempting to propose through local execution engine" + ); + Ok(local) + } + (Ok(Some(relay)), Ok(local)) => { + let is_signature_valid = relay.data.verify_signature(spec); + let header = relay.data.message.header; + + info!( + self.log(), + "Received a payload header from the connected builder"; + "block_hash" => ?header.block_hash(), + ); + + let relay_value = relay.data.message.value; + let configured_value = self.inner.builder_profit_threshold; + if relay_value < configured_value { + info!( + self.log(), + "The value offered by the connected builder does not meet \ + the configured profit threshold. Using local payload."; + "configured_value" => ?configured_value, "relay_value" => ?relay_value + ); + Ok(local) + } else if header.parent_hash() != parent_hash { + warn!( + self.log(), + "Invalid parent hash from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.prev_randao() != prev_randao { + warn!( + self.log(), + "Invalid prev randao from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.timestamp() != local.timestamp() { + warn!( + self.log(), + "Invalid timestamp from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.block_number() != local.block_number() { + warn!( + self.log(), + "Invalid block number from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if !matches!(relay.version, Some(ForkName::Merge)) { + // Once fork information is added to the payload, we will need to + // check that the local and relay payloads match. At this point, if + // we are requesting a payload at all, we have to assume this is + // the Bellatrix fork. + warn!( + self.log(), + "Invalid fork from connected builder, falling \ + back to local execution engine." + ); + Ok(local) + } else if !is_signature_valid { + let pubkey_bytes = relay.data.message.pubkey; + warn!(self.log(), "Invalid signature for pubkey {pubkey_bytes} on \ + bid from connected builder, falling back to local execution engine."); + Ok(local) + } else { + if header.fee_recipient() != suggested_fee_recipient { + info!( + self.log(), + "Fee recipient from connected builder does \ + not match, using it anyways." + ); + } + Ok(header) + } + } + (relay_result, Err(local_error)) => { + warn!(self.log(), "Failure from local execution engine. Attempting to \ + propose through connected builder"; "error" => ?local_error); + relay_result + .map_err(Error::Builder)? + .ok_or(Error::NoHeaderFromBuilder) + .map(|d| d.data.message.header) + } + }; + } + ChainHealth::Unhealthy(condition) => { + info!(self.log(), "Due to poor chain health the local execution engine will be used \ + for payload construction. To adjust chain health conditions \ + Use `builder-fallback` prefixed flags"; + "failed_condition" => ?condition) + } + // Intentional no-op, so we never attempt builder API proposals pre-merge. + ChainHealth::PreMerge => (), + ChainHealth::Optimistic => info!(self.log(), "The local execution engine is syncing \ + so the builder network cannot safely be used. Attempting \ + to build a block with the local execution engine"), + } + } + self.get_full_payload_caching( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + ) + .await + } + + /// Get a full payload without caching its result in the execution layer's payload cache. + async fn get_full_payload>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, + ) -> Result { + self.get_full_payload_with( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + noop, + ) + .await + } + + /// Get a full payload and cache its result in the execution layer's payload cache. + async fn get_full_payload_caching>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, + ) -> Result { + self.get_full_payload_with( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + Self::cache_payload, + ) + .await + } + + async fn get_full_payload_with>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, + f: fn(&ExecutionLayer, &ExecutionPayload) -> Option>, + ) -> Result { + debug!( + self.log(), + "Issuing engine_getPayload"; + "suggested_fee_recipient" => ?suggested_fee_recipient, + "prev_randao" => ?prev_randao, + "timestamp" => timestamp, + "parent_hash" => ?parent_hash, + ); + self.engine() + .request(|engine| async move { + let payload_id = if let Some(id) = engine + .get_payload_id(parent_hash, timestamp, prev_randao, suggested_fee_recipient) + .await + { + // The payload id has been cached for this engine. + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, + &[metrics::HIT], + ); + id + } else { + // The payload id has *not* been cached. Trigger an artificial + // fork choice update to retrieve a payload ID. + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PRE_PREPARED_PAYLOAD_ID, + &[metrics::MISS], + ); + let fork_choice_state = ForkChoiceState { + head_block_hash: parent_hash, + safe_block_hash: forkchoice_update_params + .justified_hash + .unwrap_or_else(ExecutionBlockHash::zero), + finalized_block_hash: forkchoice_update_params + .finalized_hash + .unwrap_or_else(ExecutionBlockHash::zero), + }; + let payload_attributes = PayloadAttributes { + timestamp, + prev_randao, + suggested_fee_recipient, + }; + + let response = engine + .notify_forkchoice_updated( + fork_choice_state, + Some(payload_attributes), + self.log(), + ) + .await?; + + match response.payload_id { + Some(payload_id) => payload_id, + None => { + error!( + self.log(), + "Exec engine unable to produce payload"; + "msg" => "No payload ID, the engine is likely syncing. \ + This has the potential to cause a missed block proposal.", + "status" => ?response.payload_status + ); + return Err(ApiError::PayloadIdUnavailable); + } + } + }; + + engine + .api + .get_payload_v1::(payload_id) + .await + .map(|full_payload| { + if full_payload.fee_recipient != suggested_fee_recipient { + error!( + self.log(), + "Inconsistent fee recipient"; + "msg" => "The fee recipient returned from the Execution Engine differs \ + from the suggested_fee_recipient set on the beacon node. This could \ + indicate that fees are being diverted to another address. Please \ + ensure that the value of suggested_fee_recipient is set correctly and \ + that the Execution Engine is trusted.", + "fee_recipient" => ?full_payload.fee_recipient, + "suggested_fee_recipient" => ?suggested_fee_recipient, + ); + } + if f(self, &full_payload).is_some() { + warn!( + self.log(), + "Duplicate payload cached, this might indicate redundant proposal \ + attempts." + ); + } + full_payload.into() + }) + }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } + /// Maps to the `engine_newPayload` JSON-RPC call. /// /// ## Fallback Behaviour @@ -709,7 +901,7 @@ impl ExecutionLayer { /// - Invalid, if any nodes return invalid. /// - Syncing, if any nodes return syncing. /// - An error, if all nodes return an error. - pub async fn notify_new_payload( + pub async fn notify_new_payload( &self, execution_payload: &ExecutionPayload, ) -> Result { @@ -726,16 +918,21 @@ impl ExecutionLayer { "block_number" => execution_payload.block_number, ); - let broadcast_results = self - .engines() - .broadcast(|engine| engine.api.new_payload_v1(execution_payload.clone())) + let result = self + .engine() + .request(|engine| engine.api.new_payload_v1(execution_payload.clone())) .await; - process_multiple_payload_statuses( - execution_payload.block_hash, - broadcast_results.into_iter(), - self.log(), - ) + if let Ok(status) = &result { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PAYLOAD_STATUS, + &["new_payload", status.status.into()], + ); + } + + process_payload_status(execution_payload.block_hash, result, self.log()) + .map_err(Box::new) + .map_err(Error::EngineError) } /// Register that the given `validator_index` is going to produce a block at `slot`. @@ -812,6 +1009,7 @@ impl ExecutionLayer { pub async fn notify_forkchoice_updated( &self, head_block_hash: ExecutionBlockHash, + justified_block_hash: ExecutionBlockHash, finalized_block_hash: ExecutionBlockHash, current_slot: Slot, head_block_root: Hash256, @@ -825,6 +1023,7 @@ impl ExecutionLayer { self.log(), "Issuing engine_forkchoiceUpdated"; "finalized_block_hash" => ?finalized_block_hash, + "justified_block_hash" => ?justified_block_hash, "head_block_hash" => ?head_block_hash, ); @@ -851,47 +1050,39 @@ impl ExecutionLayer { } } - // see https://hackmd.io/@n0ble/kintsugi-spec#Engine-API - // for now, we must set safe_block_hash = head_block_hash let forkchoice_state = ForkChoiceState { head_block_hash, - safe_block_hash: head_block_hash, + safe_block_hash: justified_block_hash, finalized_block_hash, }; - self.engines() + self.engine() .set_latest_forkchoice_state(forkchoice_state) .await; - let broadcast_results = self - .engines() - .broadcast(|engine| async move { + let result = self + .engine() + .request(|engine| async move { engine .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) .await }) .await; - // Only query builders with payload attributes populated. - let builder_broadcast_results = if payload_attributes.is_some() { - self.builders() - .broadcast_without_retry(|engine| async move { - engine - .notify_forkchoice_updated(forkchoice_state, payload_attributes, self.log()) - .await - }) - .await - } else { - vec![] - }; - process_multiple_payload_statuses( + if let Ok(status) = &result { + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_PAYLOAD_STATUS, + &["forkchoice_updated", status.payload_status.status.into()], + ); + } + + process_payload_status( head_block_hash, - broadcast_results - .into_iter() - .chain(builder_broadcast_results.into_iter()) - .map(|result| result.map(|response| response.payload_status)), + result.map(|response| response.payload_status), self.log(), ) + .map_err(Box::new) + .map_err(Error::EngineError) } pub async fn exchange_transition_configuration(&self, spec: &ChainSpec) -> Result<(), Error> { @@ -901,55 +1092,43 @@ impl ExecutionLayer { terminal_block_number: 0, }; - let broadcast_results = self - .engines() - .broadcast(|engine| engine.api.exchange_transition_configuration_v1(local)) + let result = self + .engine() + .request(|engine| engine.api.exchange_transition_configuration_v1(local)) .await; - let mut errors = vec![]; - for (i, result) in broadcast_results.into_iter().enumerate() { - match result { - Ok(remote) => { - if local.terminal_total_difficulty != remote.terminal_total_difficulty - || local.terminal_block_hash != remote.terminal_block_hash - { - error!( - self.log(), - "Execution client config mismatch"; - "msg" => "ensure lighthouse and the execution client are up-to-date and \ - configured consistently", - "execution_endpoint" => i, - "remote" => ?remote, - "local" => ?local, - ); - errors.push(EngineError::Api { - id: i.to_string(), - error: ApiError::TransitionConfigurationMismatch, - }); - } else { - debug!( - self.log(), - "Execution client config is OK"; - "execution_endpoint" => i - ); - } - } - Err(e) => { + match result { + Ok(remote) => { + if local.terminal_total_difficulty != remote.terminal_total_difficulty + || local.terminal_block_hash != remote.terminal_block_hash + { error!( self.log(), - "Unable to get transition config"; - "error" => ?e, - "execution_endpoint" => i, + "Execution client config mismatch"; + "msg" => "ensure lighthouse and the execution client are up-to-date and \ + configured consistently", + "remote" => ?remote, + "local" => ?local, ); - errors.push(e); + Err(Error::EngineError(Box::new(EngineError::Api { + error: ApiError::TransitionConfigurationMismatch, + }))) + } else { + debug!( + self.log(), + "Execution client config is OK"; + ); + Ok(()) } } - } - - if errors.is_empty() { - Ok(()) - } else { - Err(Error::EngineErrors(errors)) + Err(e) => { + error!( + self.log(), + "Unable to get transition config"; + "error" => ?e, + ); + Err(Error::EngineError(Box::new(e))) + } } } @@ -963,6 +1142,7 @@ impl ExecutionLayer { pub async fn get_terminal_pow_block_hash( &self, spec: &ChainSpec, + timestamp: u64, ) -> Result, Error> { let _timer = metrics::start_timer_vec( &metrics::EXECUTION_LAYER_REQUEST_TIMES, @@ -970,8 +1150,8 @@ impl ExecutionLayer { ); let hash_opt = self - .engines() - .first_success(|engine| async move { + .engine() + .request(|engine| async move { let terminal_block_hash = spec.terminal_block_hash; if terminal_block_hash != ExecutionBlockHash::zero() { if self @@ -985,11 +1165,23 @@ impl ExecutionLayer { } } - self.get_pow_block_hash_at_total_difficulty(engine, spec) - .await + let block = self.get_pow_block_at_total_difficulty(engine, spec).await?; + if let Some(pow_block) = block { + // If `terminal_block.timestamp == transition_block.timestamp`, + // we violate the invariant that a block's timestamp must be + // strictly greater than its parent's timestamp. + // The execution layer will reject a fcu call with such payload + // attributes leading to a missed block. + // Hence, we return `None` in such a case. + if pow_block.timestamp >= timestamp { + return Ok(None); + } + } + Ok(block.map(|b| b.block_hash)) }) .await - .map_err(Error::EngineErrors)?; + .map_err(Box::new) + .map_err(Error::EngineError)?; if let Some(hash) = &hash_opt { info!( @@ -1013,11 +1205,11 @@ impl ExecutionLayer { /// `get_pow_block_at_terminal_total_difficulty` /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md - async fn get_pow_block_hash_at_total_difficulty( + async fn get_pow_block_at_total_difficulty( &self, - engine: &Engine, + engine: &Engine, spec: &ChainSpec, - ) -> Result, ApiError> { + ) -> Result, ApiError> { let mut block = engine .api .get_block_by_number(BlockByNumberQuery::Tag(LATEST_TAG)) @@ -1030,7 +1222,7 @@ impl ExecutionLayer { let block_reached_ttd = block.total_difficulty >= spec.terminal_total_difficulty; if block_reached_ttd { if block.parent_hash == ExecutionBlockHash::zero() { - return Ok(Some(block.block_hash)); + return Ok(Some(block)); } let parent = self .get_pow_block(engine, block.parent_hash) @@ -1039,7 +1231,7 @@ impl ExecutionLayer { let parent_reached_ttd = parent.total_difficulty >= spec.terminal_total_difficulty; if block_reached_ttd && !parent_reached_ttd { - return Ok(Some(block.block_hash)); + return Ok(Some(block)); } else { block = parent; } @@ -1056,8 +1248,8 @@ impl ExecutionLayer { /// - `Some(true)` if the given `block_hash` is the terminal proof-of-work block. /// - `Some(false)` if the given `block_hash` is certainly *not* the terminal proof-of-work /// block. - /// - `None` if the `block_hash` or its parent were not present on the execution engines. - /// - `Err(_)` if there was an error connecting to the execution engines. + /// - `None` if the `block_hash` or its parent were not present on the execution engine. + /// - `Err(_)` if there was an error connecting to the execution engine. /// /// ## Fallback Behaviour /// @@ -1085,9 +1277,8 @@ impl ExecutionLayer { &[metrics::IS_VALID_TERMINAL_POW_BLOCK_HASH], ); - let broadcast_results = self - .engines() - .broadcast(|engine| async move { + self.engine() + .request(|engine| async move { if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? { if let Some(pow_parent) = self.get_pow_block(engine, pow_block.parent_hash).await? @@ -1099,38 +1290,9 @@ impl ExecutionLayer { } Ok(None) }) - .await; - - let mut errors = vec![]; - let mut terminal = 0; - let mut not_terminal = 0; - let mut block_missing = 0; - for result in broadcast_results { - match result { - Ok(Some(true)) => terminal += 1, - Ok(Some(false)) => not_terminal += 1, - Ok(None) => block_missing += 1, - Err(e) => errors.push(e), - } - } - - if terminal > 0 && not_terminal > 0 { - crit!( - self.log(), - "Consensus failure between execution nodes"; - "method" => "is_valid_terminal_pow_block_hash" - ); - } - - if terminal > 0 { - Ok(Some(true)) - } else if not_terminal > 0 { - Ok(Some(false)) - } else if block_missing > 0 { - Ok(None) - } else { - Err(Error::EngineErrors(errors)) - } + .await + .map_err(Box::new) + .map_err(Error::EngineError) } /// This function should remain internal. @@ -1149,17 +1311,9 @@ impl ExecutionLayer { } /// Maps to the `eth_getBlockByHash` JSON-RPC call. - /// - /// ## TODO(merge) - /// - /// This will return an execution block regardless of whether or not it was created by a PoW - /// miner (pre-merge) or a PoS validator (post-merge). It's not immediately clear if this is - /// correct or not, see the discussion here: - /// - /// https://github.com/ethereum/consensus-specs/issues/2636 async fn get_pow_block( &self, - engine: &Engine, + engine: &Engine, hash: ExecutionBlockHash, ) -> Result, ApiError> { if let Some(cached) = self.execution_blocks().await.get(&hash).copied() { @@ -1178,22 +1332,23 @@ impl ExecutionLayer { } } - pub async fn get_payload_by_block_hash( + pub async fn get_payload_by_block_hash( &self, hash: ExecutionBlockHash, ) -> Result>, Error> { - self.engines() - .first_success(|engine| async move { + self.engine() + .request(|engine| async move { self.get_payload_by_block_hash_from_engine(engine, hash) .await }) .await - .map_err(Error::EngineErrors) + .map_err(Box::new) + .map_err(Error::EngineError) } - async fn get_payload_by_block_hash_from_engine( + async fn get_payload_by_block_hash_from_engine( &self, - engine: &Engine, + engine: &Engine, hash: ExecutionBlockHash, ) -> Result>, ApiError> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_GET_PAYLOAD_BY_BLOCK_HASH); @@ -1236,21 +1391,24 @@ impl ExecutionLayer { })) } - pub async fn propose_blinded_beacon_block( + pub async fn propose_blinded_beacon_block( &self, block: &SignedBeaconBlock>, ) -> Result, Error> { debug!( self.log(), - "Issuing builder_proposeBlindedBlock"; + "Sending block to builder"; "root" => ?block.canonical_root(), ); - self.builders() - .first_success_without_retry(|engine| async move { - engine.api.propose_blinded_block_v1(block.clone()).await - }) - .await - .map_err(Error::EngineErrors) + if let Some(builder) = self.builder() { + builder + .post_builder_blinded_blocks(block) + .await + .map_err(Error::Builder) + .map(|d| d.data) + } else { + Err(Error::NoPayloadBuilder) + } } } @@ -1282,27 +1440,62 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_block_prior_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; - assert_eq!(el.get_terminal_pow_block_hash(&spec).await.unwrap(), None) + el.engine().upcheck().await; + assert_eq!( + el.get_terminal_pow_block_hash(&spec, timestamp_now()) + .await + .unwrap(), + None + ) }) .await .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { assert_eq!( - el.get_terminal_pow_block_hash(&spec).await.unwrap(), + el.get_terminal_pow_block_hash(&spec, timestamp_now()) + .await + .unwrap(), Some(terminal_block.unwrap().block_hash) ) }) .await; } + #[tokio::test] + async fn rejects_terminal_block_with_equal_timestamp() { + let runtime = TestRuntime::default(); + MockExecutionLayer::default_params(runtime.task_executor.clone()) + .move_to_block_prior_to_terminal_block() + .with_terminal_block(|spec, el, _| async move { + el.engine().upcheck().await; + assert_eq!( + el.get_terminal_pow_block_hash(&spec, timestamp_now()) + .await + .unwrap(), + None + ) + }) + .await + .move_to_terminal_block() + .with_terminal_block(|spec, el, terminal_block| async move { + let timestamp = terminal_block.as_ref().map(|b| b.timestamp).unwrap(); + assert_eq!( + el.get_terminal_pow_block_hash(&spec, timestamp) + .await + .unwrap(), + None + ) + }) + .await; + } + #[tokio::test] async fn verifies_valid_terminal_block_hash() { let runtime = TestRuntime::default(); MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; assert_eq!( el.is_valid_terminal_pow_block_hash(terminal_block.unwrap().block_hash, &spec) .await @@ -1319,7 +1512,7 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, terminal_block| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; let invalid_terminal_block = terminal_block.unwrap().parent_hash; assert_eq!( @@ -1338,7 +1531,7 @@ mod test { MockExecutionLayer::default_params(runtime.task_executor.clone()) .move_to_terminal_block() .with_terminal_block(|spec, el, _| async move { - el.engines().upcheck_not_synced(Logging::Disabled).await; + el.engine().upcheck().await; let missing_terminal_block = ExecutionBlockHash::repeat_byte(42); assert_eq!( @@ -1351,3 +1544,16 @@ mod test { .await; } } + +fn noop(_: &ExecutionLayer, _: &ExecutionPayload) -> Option> { + None +} + +#[cfg(test)] +/// Returns the duration since the unix epoch. +fn timestamp_now() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_secs() +} diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 356c5a46dd..9b00193a4a 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -3,6 +3,7 @@ pub use lighthouse_metrics::*; pub const HIT: &str = "hit"; pub const MISS: &str = "miss"; pub const GET_PAYLOAD: &str = "get_payload"; +pub const GET_BLINDED_PAYLOAD: &str = "get_blinded_payload"; pub const NEW_PAYLOAD: &str = "new_payload"; pub const FORKCHOICE_UPDATED: &str = "forkchoice_updated"; pub const GET_TERMINAL_POW_BLOCK_HASH: &str = "get_terminal_pow_block_hash"; @@ -35,4 +36,9 @@ lazy_static::lazy_static! { "execution_layer_get_payload_by_block_hash_time", "Time to reconstruct a payload from the EE using eth_getBlockByHash" ); + pub static ref EXECUTION_LAYER_PAYLOAD_STATUS: Result = try_create_int_counter_vec( + "execution_layer_payload_status", + "Indicates the payload status returned for a particular method", + &["method", "status"] + ); } diff --git a/beacon_node/execution_layer/src/payload_cache.rs b/beacon_node/execution_layer/src/payload_cache.rs new file mode 100644 index 0000000000..60a8f2a95c --- /dev/null +++ b/beacon_node/execution_layer/src/payload_cache.rs @@ -0,0 +1,33 @@ +use lru::LruCache; +use parking_lot::Mutex; +use tree_hash::TreeHash; +use types::{EthSpec, ExecutionPayload, Hash256}; + +pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10; + +/// A cache mapping execution payloads by tree hash roots. +pub struct PayloadCache { + payloads: Mutex>>, +} + +#[derive(Hash, PartialEq, Eq)] +struct PayloadCacheId(Hash256); + +impl Default for PayloadCache { + fn default() -> Self { + PayloadCache { + payloads: Mutex::new(LruCache::new(DEFAULT_PAYLOAD_CACHE_SIZE)), + } + } +} + +impl PayloadCache { + pub fn put(&self, payload: ExecutionPayload) -> Option> { + let root = payload.tree_hash_root(); + self.payloads.lock().put(PayloadCacheId(root), payload) + } + + pub fn pop(&self, root: &Hash256) -> Option> { + self.payloads.lock().pop(&PayloadCacheId(*root)) + } +} diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index e0b1a01b43..7db8e234d1 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -1,7 +1,6 @@ use crate::engine_api::{Error as ApiError, PayloadStatusV1, PayloadStatusV1Status}; use crate::engines::EngineError; -use crate::Error; -use slog::{crit, warn, Logger}; +use slog::{warn, Logger}; use types::ExecutionBlockHash; /// Provides a simpler, easier to parse version of `PayloadStatusV1` for upstream users. @@ -19,173 +18,103 @@ pub enum PayloadStatus { InvalidBlockHash { validation_error: Option, }, - InvalidTerminalBlock { - validation_error: Option, - }, } -/// Processes the responses from multiple execution engines, finding the "best" status and returning -/// it (if any). -/// -/// This function has the following basic goals: -/// -/// - Detect a consensus failure between nodes. -/// - Find the most-synced node by preferring a definite response (valid/invalid) over a -/// syncing/accepted response or error. -/// -/// # Details -/// -/// - If there are conflicting valid/invalid responses, always return an error. -/// - If there are syncing/accepted responses but valid/invalid responses exist, return the -/// valid/invalid responses since they're definite. -/// - If there are multiple valid responses, return the first one processed. -/// - If there are multiple invalid responses, return the first one processed. -/// - Syncing/accepted responses are grouped, if there are multiple of them, return the first one -/// processed. -/// - If there are no responses (only errors or nothing), return an error. -pub fn process_multiple_payload_statuses( +/// Processes the response from the execution engine. +pub fn process_payload_status( head_block_hash: ExecutionBlockHash, - statuses: impl Iterator>, + status: Result, log: &Logger, -) -> Result { - let mut errors = vec![]; - let mut valid_statuses = vec![]; - let mut invalid_statuses = vec![]; - let mut other_statuses = vec![]; - - for status in statuses { - match status { - Err(e) => errors.push(e), - Ok(response) => match &response.status { - PayloadStatusV1Status::Valid => { - if response - .latest_valid_hash - .map_or(false, |h| h == head_block_hash) - { - // The response is only valid if `latest_valid_hash` is not `null` and - // equal to the provided `block_hash`. - valid_statuses.push(PayloadStatus::Valid) - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: ApiError::BadResponse( - format!( - "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", - head_block_hash, - response.latest_valid_hash, - ) - ), - }); - } - } - PayloadStatusV1Status::Invalid => { - if let Some(latest_valid_hash) = response.latest_valid_hash { - // The response is only valid if `latest_valid_hash` is not `null`. - invalid_statuses.push(PayloadStatus::Invalid { - latest_valid_hash, - validation_error: response.validation_error.clone(), - }) - } else { - errors.push(EngineError::Api { - id: "unknown".to_string(), - error: ApiError::BadResponse( - "new_payload: response.status = INVALID but null latest_valid_hash" - .to_string(), - ), - }); - } - } - PayloadStatusV1Status::InvalidBlockHash => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - invalid_statuses.push(PayloadStatus::InvalidBlockHash { - validation_error: response.validation_error.clone(), - }); - } - PayloadStatusV1Status::InvalidTerminalBlock => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - invalid_statuses.push(PayloadStatus::InvalidTerminalBlock { - validation_error: response.validation_error.clone(), - }); - } - PayloadStatusV1Status::Syncing => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - other_statuses.push(PayloadStatus::Syncing) - } - PayloadStatusV1Status::Accepted => { - // In the interests of being liberal with what we accept, only raise a - // warning here. - if response.latest_valid_hash.is_some() { - warn!( - log, - "Malformed response from execution engine"; - "msg" => "expected a null latest_valid_hash", - "status" => ?response.status - ) - } - - other_statuses.push(PayloadStatus::Accepted) - } - }, - } - } - - if !valid_statuses.is_empty() && !invalid_statuses.is_empty() { - crit!( - log, - "Consensus failure between execution nodes"; - "invalid_statuses" => ?invalid_statuses, - "valid_statuses" => ?valid_statuses, - ); - - // Choose to exit and ignore the valid response. This preferences correctness over - // liveness. - return Err(Error::ConsensusFailure); - } - - // Log any errors to assist with troubleshooting. - for error in &errors { - warn!( +) -> Result { + match status { + Err(error) => { + warn!( log, "Error whilst processing payload status"; "error" => ?error, - ); - } + ); + Err(error) + } + Ok(response) => match &response.status { + PayloadStatusV1Status::Valid => { + if response + .latest_valid_hash + .map_or(false, |h| h == head_block_hash) + { + // The response is only valid if `latest_valid_hash` is not `null` and + // equal to the provided `block_hash`. + Ok(PayloadStatus::Valid) + } else { + let error = format!( + "new_payload: response.status = VALID but invalid latest_valid_hash. Expected({:?}) Found({:?})", + head_block_hash, + response.latest_valid_hash + ); + Err(EngineError::Api { + error: ApiError::BadResponse(error), + }) + } + } + PayloadStatusV1Status::Invalid => { + if let Some(latest_valid_hash) = response.latest_valid_hash { + // The response is only valid if `latest_valid_hash` is not `null`. + Ok(PayloadStatus::Invalid { + latest_valid_hash, + validation_error: response.validation_error.clone(), + }) + } else { + Err(EngineError::Api { + error: ApiError::BadResponse( + "new_payload: response.status = INVALID but null latest_valid_hash" + .to_string(), + ), + }) + } + } + PayloadStatusV1Status::InvalidBlockHash => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } - valid_statuses - .first() - .or_else(|| invalid_statuses.first()) - .or_else(|| other_statuses.first()) - .cloned() - .map(Result::Ok) - .unwrap_or_else(|| Err(Error::EngineErrors(errors))) + Ok(PayloadStatus::InvalidBlockHash { + validation_error: response.validation_error.clone(), + }) + } + PayloadStatusV1Status::Syncing => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + Ok(PayloadStatus::Syncing) + } + PayloadStatusV1Status::Accepted => { + // In the interests of being liberal with what we accept, only raise a + // warning here. + if response.latest_valid_hash.is_some() { + warn!( + log, + "Malformed response from execution engine"; + "msg" => "expected a null latest_valid_hash", + "status" => ?response.status + ) + } + + Ok(PayloadStatus::Accepted) + } + }, + } } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index b61092cf0e..3620a02dfb 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,10 +1,13 @@ -use crate::engine_api::{ - json_structures::{ - JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, - }, - ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, -}; use crate::engines::ForkChoiceState; +use crate::{ + engine_api::{ + json_structures::{ + JsonForkchoiceUpdatedV1Response, JsonPayloadStatusV1, JsonPayloadStatusV1Status, + }, + ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, + }, + ExecutionBlockWithTransactions, +}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use tree_hash::TreeHash; @@ -57,15 +60,39 @@ impl Block { block_number: block.block_number, parent_hash: block.parent_hash, total_difficulty: block.total_difficulty, + timestamp: block.timestamp, }, Block::PoS(payload) => ExecutionBlock { block_hash: payload.block_hash, block_number: payload.block_number, parent_hash: payload.parent_hash, total_difficulty, + timestamp: payload.timestamp, }, } } + + pub fn as_execution_block_with_tx(&self) -> Option> { + match self { + Block::PoS(payload) => Some(ExecutionBlockWithTransactions { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: vec![], + }), + Block::PoW(_) => None, + } + } } #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize, TreeHash)] @@ -75,8 +102,10 @@ pub struct PoWBlock { pub block_hash: ExecutionBlockHash, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, + pub timestamp: u64, } +#[derive(Clone)] pub struct ExecutionBlockGenerator { /* * Common database @@ -153,6 +182,14 @@ impl ExecutionBlockGenerator { .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } + pub fn execution_block_with_txs_by_hash( + &self, + hash: ExecutionBlockHash, + ) -> Option> { + self.block_by_hash(hash) + .and_then(|block| block.as_execution_block_with_tx()) + } + pub fn move_to_block_prior_to_terminal_block(&mut self) -> Result<(), String> { let target_block = self .terminal_block_number @@ -233,6 +270,26 @@ impl ExecutionBlockGenerator { Ok(()) } + pub fn modify_last_block(&mut self, block_modifier: impl FnOnce(&mut Block)) { + if let Some((last_block_hash, block_number)) = + self.block_hashes.keys().max().and_then(|block_number| { + self.block_hashes + .get(block_number) + .map(|block| (block, *block_number)) + }) + { + let mut block = self.blocks.remove(last_block_hash).unwrap(); + block_modifier(&mut block); + // Update the block hash after modifying the block + match &mut block { + Block::PoW(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), + Block::PoS(b) => b.block_hash = ExecutionBlockHash::from_root(b.tree_hash_root()), + } + self.block_hashes.insert(block_number, block.block_hash()); + self.blocks.insert(block.block_hash(), block); + } + } + pub fn get_payload(&mut self, id: &PayloadId) -> Option> { self.payload_ids.get(id).cloned() } @@ -279,7 +336,9 @@ impl ExecutionBlockGenerator { } let unknown_head_block_hash = !self.blocks.contains_key(&forkchoice_state.head_block_hash); - let unknown_safe_block_hash = !self.blocks.contains_key(&forkchoice_state.safe_block_hash); + let unknown_safe_block_hash = forkchoice_state.safe_block_hash + != ExecutionBlockHash::zero() + && !self.blocks.contains_key(&forkchoice_state.safe_block_hash); let unknown_finalized_block_hash = forkchoice_state.finalized_block_hash != ExecutionBlockHash::zero() && !self @@ -390,6 +449,7 @@ pub fn generate_pow_block( block_hash: ExecutionBlockHash::zero(), parent_hash, total_difficulty, + timestamp: block_number, }; block.block_hash = ExecutionBlockHash::from_root(block.tree_hash_root()); diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 772ac3c866..975f09fa5e 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -49,12 +49,30 @@ pub async fn handle_rpc( .map_err(|e| format!("unable to parse hash: {:?}", e)) })?; - Ok(serde_json::to_value( - ctx.execution_block_generator - .read() - .execution_block_by_hash(hash), - ) - .unwrap()) + // If we have a static response set, just return that. + if let Some(response) = *ctx.static_get_block_by_hash_response.lock() { + return Ok(serde_json::to_value(response).unwrap()); + } + + let full_tx = params + .get(1) + .and_then(JsonValue::as_bool) + .ok_or_else(|| "missing/invalid params[1] value".to_string())?; + if full_tx { + Ok(serde_json::to_value( + ctx.execution_block_generator + .read() + .execution_block_with_txs_by_hash(hash), + ) + .unwrap()) + } else { + Ok(serde_json::to_value( + ctx.execution_block_generator + .read() + .execution_block_by_hash(hash), + ) + .unwrap()) + } } ENGINE_NEW_PAYLOAD_V1 => { let request: JsonExecutionPayloadV1 = get_param(params, 0)?; @@ -120,6 +138,15 @@ pub async fn handle_rpc( Ok(serde_json::to_value(response).unwrap()) } + ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => { + let block_generator = ctx.execution_block_generator.read(); + let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 { + terminal_total_difficulty: block_generator.terminal_total_difficulty, + terminal_block_hash: block_generator.terminal_block_hash, + terminal_block_number: block_generator.terminal_block_number, + }; + Ok(serde_json::to_value(transition_config).unwrap()) + } other => Err(format!( "The method {} does not exist/is not available", other diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs new file mode 100644 index 0000000000..b8f74c1c93 --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -0,0 +1,385 @@ +use crate::test_utils::DEFAULT_JWT_SECRET; +use crate::{Config, ExecutionLayer, PayloadAttributes}; +use async_trait::async_trait; +use eth2::types::{BlockId, StateId, ValidatorId}; +use eth2::{BeaconNodeHttpClient, Timeouts}; +use ethereum_consensus::crypto::{SecretKey, Signature}; +use ethereum_consensus::primitives::BlsPublicKey; +pub use ethereum_consensus::state_transition::Context; +use fork_choice::ForkchoiceUpdateParameters; +use mev_build_rs::{ + sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, + BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, + ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid, + SignedValidatorRegistration, +}; +use parking_lot::RwLock; +use sensitive_url::SensitiveUrl; +use ssz::{Decode, Encode}; +use ssz_rs::{Merkleized, SimpleSerialize}; +use std::collections::HashMap; +use std::fmt::Debug; +use std::net::Ipv4Addr; +use std::sync::Arc; +use std::time::Duration; +use task_executor::TaskExecutor; +use tempfile::NamedTempFile; +use tree_hash::TreeHash; +use types::{ + Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, Hash256, Slot, Uint256, +}; + +#[derive(Clone)] +pub enum Operation { + FeeRecipient(Address), + GasLimit(usize), + Value(Uint256), + ParentHash(Hash256), + PrevRandao(Hash256), + BlockNumber(usize), + Timestamp(usize), +} + +impl Operation { + fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + match self { + Operation::FeeRecipient(fee_recipient) => { + bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? + } + Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, + Operation::Value(value) => bid.value = to_ssz_rs(&value)?, + Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, + Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, + Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, + Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64, + } + Ok(()) + } +} + +pub struct TestingBuilder { + server: BlindedBlockProviderServer>, + pub builder: MockBuilder, +} + +impl TestingBuilder { + pub fn new( + mock_el_url: SensitiveUrl, + builder_url: SensitiveUrl, + beacon_url: SensitiveUrl, + spec: ChainSpec, + executor: TaskExecutor, + ) -> Self { + let file = NamedTempFile::new().unwrap(); + let path = file.path().into(); + std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); + + // This EL should not talk to a builder + let config = Config { + execution_endpoints: vec![mock_el_url], + secret_files: vec![path], + suggested_fee_recipient: None, + ..Default::default() + }; + + let el = + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); + + // This should probably be done for all fields, we only update ones we are testing with so far. + let mut context = Context::for_mainnet(); + context.terminal_total_difficulty = to_ssz_rs(&spec.terminal_total_difficulty).unwrap(); + context.terminal_block_hash = to_ssz_rs(&spec.terminal_block_hash).unwrap(); + context.terminal_block_hash_activation_epoch = + to_ssz_rs(&spec.terminal_block_hash_activation_epoch).unwrap(); + + let builder = MockBuilder::new( + el, + BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), + spec, + context, + ); + let port = builder_url.full.port().unwrap(); + let host: Ipv4Addr = builder_url + .full + .host_str() + .unwrap() + .to_string() + .parse() + .unwrap(); + let server = BlindedBlockProviderServer::new(host, port, builder.clone()); + Self { server, builder } + } + + pub async fn run(&self) { + self.server.run().await + } +} + +#[derive(Clone)] +pub struct MockBuilder { + el: ExecutionLayer, + beacon_client: BeaconNodeHttpClient, + spec: ChainSpec, + context: Arc, + val_registration_cache: Arc>>, + builder_sk: SecretKey, + operations: Arc>>, + invalidate_signatures: Arc>, +} + +impl MockBuilder { + pub fn new( + el: ExecutionLayer, + beacon_client: BeaconNodeHttpClient, + spec: ChainSpec, + context: Context, + ) -> Self { + let sk = SecretKey::random(&mut rand::thread_rng()).unwrap(); + Self { + el, + beacon_client, + // Should keep spec and context consistent somehow + spec, + context: Arc::new(context), + val_registration_cache: Arc::new(RwLock::new(HashMap::new())), + builder_sk: sk, + operations: Arc::new(RwLock::new(vec![])), + invalidate_signatures: Arc::new(RwLock::new(false)), + } + } + + pub fn add_operation(&self, op: Operation) { + // Insert operations at the front of the vec to make sure `apply_operations` applies them + // in the order they are added. + self.operations.write().insert(0, op); + } + + pub fn invalid_signatures(&self) { + *self.invalidate_signatures.write() = true; + } + + pub fn valid_signatures(&mut self) { + *self.invalidate_signatures.write() = false; + } + + fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + let mut guard = self.operations.write(); + while let Some(op) = guard.pop() { + op.apply(bid)?; + } + Ok(()) + } +} + +#[async_trait] +impl mev_build_rs::BlindedBlockProvider for MockBuilder { + async fn register_validators( + &self, + registrations: &mut [SignedValidatorRegistration], + ) -> Result<(), BlindedBlockProviderError> { + for registration in registrations { + let pubkey = registration.message.public_key.clone(); + let message = &mut registration.message; + verify_signed_builder_message( + message, + ®istration.signature, + &pubkey, + &self.context, + )?; + self.val_registration_cache.write().insert( + registration.message.public_key.clone(), + registration.clone(), + ); + } + + Ok(()) + } + + async fn fetch_best_bid( + &self, + bid_request: &BidRequest, + ) -> Result { + let slot = Slot::new(bid_request.slot); + let signed_cached_data = self + .val_registration_cache + .read() + .get(&bid_request.public_key) + .ok_or_else(|| convert_err("missing registration"))? + .clone(); + let cached_data = signed_cached_data.message; + + let head = self + .beacon_client + .get_beacon_blocks::(BlockId::Head) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing head block"))?; + + let block = head.data.message_merge().map_err(convert_err)?; + let head_block_root = block.tree_hash_root(); + let head_execution_hash = block.body.execution_payload.execution_payload.block_hash; + if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { + return Err(BlindedBlockProviderError::Custom(format!( + "head mismatch: {} {}", + head_execution_hash, bid_request.parent_hash + ))); + } + + let finalized_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Finalized) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing finalized block"))? + .data + .message_merge() + .map_err(convert_err)? + .body + .execution_payload + .execution_payload + .block_hash; + + let justified_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Justified) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing finalized block"))? + .data + .message_merge() + .map_err(convert_err)? + .body + .execution_payload + .execution_payload + .block_hash; + + let val_index = self + .beacon_client + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(from_ssz_rs(&cached_data.public_key)?), + ) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing validator from state"))? + .data + .index; + let fee_recipient = from_ssz_rs(&cached_data.fee_recipient)?; + let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); + + let genesis_time = self + .beacon_client + .get_beacon_genesis() + .await + .map_err(convert_err)? + .data + .genesis_time; + let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; + + let head_state: BeaconState = self + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(convert_err)? + .ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? + .data; + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(convert_err)?; + + let payload_attributes = PayloadAttributes { + timestamp, + prev_randao: *prev_randao, + suggested_fee_recipient: fee_recipient, + }; + + self.el + .insert_proposer(slot, head_block_root, val_index, payload_attributes) + .await; + + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: Hash256::zero(), + head_hash: None, + justified_hash: Some(justified_execution_hash), + finalized_hash: Some(finalized_execution_hash), + }; + + let payload = self + .el + .get_full_payload_caching::>( + head_execution_hash, + timestamp, + *prev_randao, + fee_recipient, + forkchoice_update_params, + ) + .await + .map_err(convert_err)? + .to_execution_payload_header(); + + let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; + let mut header: ServerPayloadHeader = + serde_json::from_str(json_payload.as_str()).map_err(convert_err)?; + + header.gas_limit = cached_data.gas_limit; + + let mut message = BuilderBid { + header, + value: ssz_rs::U256::default(), + public_key: self.builder_sk.public_key(), + }; + + self.apply_operations(&mut message)?; + + let mut signature = + sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?; + + if *self.invalidate_signatures.read() { + signature = Signature::default(); + } + + let signed_bid = SignedBuilderBid { message, signature }; + Ok(signed_bid) + } + + async fn open_bid( + &self, + signed_block: &mut SignedBlindedBeaconBlock, + ) -> Result { + let payload = self + .el + .get_payload_by_root(&from_ssz_rs( + &signed_block + .message + .body + .execution_payload_header + .hash_tree_root() + .map_err(convert_err)?, + )?) + .ok_or_else(|| convert_err("missing payload for tx root"))?; + + let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; + serde_json::from_str(json_payload.as_str()).map_err(convert_err) + } +} + +pub fn from_ssz_rs( + ssz_rs_data: &T, +) -> Result { + U::from_ssz_bytes( + ssz_rs::serialize(ssz_rs_data) + .map_err(convert_err)? + .as_ref(), + ) + .map_err(convert_err) +} + +pub fn to_ssz_rs( + ssz_data: &T, +) -> Result { + ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) +} + +fn convert_err(e: E) -> BlindedBlockProviderError { + BlindedBlockProviderError::Custom(format!("{e:?}")) +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 5770a8a382..065abc9360 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -1,15 +1,19 @@ use crate::{ - test_utils::{MockServer, DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY, JWT_SECRET}, + test_utils::{ + MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, + DEFAULT_TERMINAL_DIFFICULTY, + }, Config, *, }; use sensitive_url::SensitiveUrl; use task_executor::TaskExecutor; use tempfile::NamedTempFile; +use tree_hash::TreeHash; use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; pub struct MockExecutionLayer { pub server: MockServer, - pub el: ExecutionLayer, + pub el: ExecutionLayer, pub executor: TaskExecutor, pub spec: ChainSpec, } @@ -22,6 +26,8 @@ impl MockExecutionLayer { DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), Epoch::new(0), + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + None, ) } @@ -31,6 +37,8 @@ impl MockExecutionLayer { terminal_block: u64, terminal_block_hash: ExecutionBlockHash, terminal_block_hash_activation_epoch: Epoch, + jwt_key: Option, + builder_url: Option, ) -> Self { let handle = executor.handle().unwrap(); @@ -39,8 +47,10 @@ impl MockExecutionLayer { spec.terminal_block_hash = terminal_block_hash; spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch; + let jwt_key = jwt_key.unwrap_or_else(JwtKey::random); let server = MockServer::new( &handle, + jwt_key, terminal_total_difficulty, terminal_block, terminal_block_hash, @@ -50,12 +60,14 @@ impl MockExecutionLayer { let file = NamedTempFile::new().unwrap(); let path = file.path().into(); - std::fs::write(&path, hex::encode(JWT_SECRET)).unwrap(); + std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); let config = Config { execution_endpoints: vec![url], + builder_url, secret_files: vec![path], suggested_fee_recipient: Some(Address::repeat_byte(42)), + builder_profit_threshold: DEFAULT_BUILDER_THRESHOLD_WEI, ..Default::default() }; let el = @@ -79,11 +91,16 @@ impl MockExecutionLayer { let block_number = latest_execution_block.block_number() + 1; let timestamp = block_number; let prev_randao = Hash256::from_low_u64_be(block_number); - let finalized_block_hash = parent_hash; + let head_block_root = Hash256::repeat_byte(42); + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: head_block_root, + head_hash: Some(parent_hash), + justified_hash: None, + finalized_hash: None, + }; // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); - let head_block_root = Hash256::repeat_byte(42); let validator_index = 0; self.el .insert_proposer( @@ -102,6 +119,7 @@ impl MockExecutionLayer { .notify_forkchoice_updated( parent_hash, ExecutionBlockHash::zero(), + ExecutionBlockHash::zero(), slot, head_block_root, ) @@ -109,14 +127,21 @@ impl MockExecutionLayer { .unwrap(); let validator_index = 0; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot, + chain_health: ChainHealth::Healthy, + }; let payload = self .el - .get_payload::>( + .get_payload::>( parent_hash, timestamp, prev_randao, - finalized_block_hash, validator_index, + forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() @@ -127,6 +152,43 @@ impl MockExecutionLayer { assert_eq!(payload.timestamp, timestamp); assert_eq!(payload.prev_randao, prev_randao); + // Ensure the payload cache is empty. + assert!(self + .el + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot, + chain_health: ChainHealth::Healthy, + }; + let payload_header = self + .el + .get_payload::>( + parent_hash, + timestamp, + prev_randao, + validator_index, + forkchoice_update_params, + builder_params, + &self.spec, + ) + .await + .unwrap() + .execution_payload_header; + assert_eq!(payload_header.block_hash, block_hash); + assert_eq!(payload_header.parent_hash, parent_hash); + assert_eq!(payload_header.block_number, block_number); + assert_eq!(payload_header.timestamp, timestamp); + assert_eq!(payload_header.prev_randao, prev_randao); + + // Ensure the payload cache has the correct payload. + assert_eq!( + self.el + .get_payload_by_root(&payload_header.tree_hash_root()), + Some(payload.clone()) + ); + let status = self.el.notify_new_payload(&payload).await.unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -137,6 +199,7 @@ impl MockExecutionLayer { .notify_forkchoice_updated( block_hash, ExecutionBlockHash::zero(), + ExecutionBlockHash::zero(), slot, head_block_root, ) @@ -173,7 +236,7 @@ impl MockExecutionLayer { pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self where - U: Fn(ChainSpec, ExecutionLayer, Option) -> V, + U: Fn(ChainSpec, ExecutionLayer, Option) -> V, V: Future, { let terminal_block_number = self diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 805f6716fb..aaeea8aa5a 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -2,11 +2,11 @@ use crate::engine_api::auth::JwtKey; use crate::engine_api::{ - auth::Auth, http::JSONRPC_VERSION, PayloadStatusV1, PayloadStatusV1Status, + auth::Auth, http::JSONRPC_VERSION, ExecutionBlock, PayloadStatusV1, PayloadStatusV1Status, }; use bytes::Bytes; use environment::null_logger; -use execution_block_generator::{Block, PoWBlock}; +use execution_block_generator::PoWBlock; use handle_rpc::handle_rpc; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use serde::{Deserialize, Serialize}; @@ -21,17 +21,41 @@ use tokio::{runtime, sync::oneshot}; use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; -pub use execution_block_generator::{generate_pow_block, ExecutionBlockGenerator}; +pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; +pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; pub const DEFAULT_TERMINAL_BLOCK: u64 = 64; -pub const JWT_SECRET: [u8; 32] = [42; 32]; +pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; +pub const DEFAULT_BUILDER_THRESHOLD_WEI: u128 = 1_000_000_000_000_000_000; mod execution_block_generator; mod handle_rpc; +mod mock_builder; mod mock_execution_layer; +/// Configuration for the MockExecutionLayer. +pub struct MockExecutionConfig { + pub server_config: Config, + pub jwt_key: JwtKey, + pub terminal_difficulty: Uint256, + pub terminal_block: u64, + pub terminal_block_hash: ExecutionBlockHash, +} + +impl Default for MockExecutionConfig { + fn default() -> Self { + Self { + jwt_key: JwtKey::random(), + terminal_difficulty: DEFAULT_TERMINAL_DIFFICULTY.into(), + terminal_block: DEFAULT_TERMINAL_BLOCK, + terminal_block_hash: ExecutionBlockHash::zero(), + server_config: Config::default(), + } + } +} + pub struct MockServer { _shutdown_tx: oneshot::Sender<()>, listen_socket_addr: SocketAddr, @@ -43,25 +67,29 @@ impl MockServer { pub fn unit_testing() -> Self { Self::new( &runtime::Handle::current(), + JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), ) } - pub fn new( - handle: &runtime::Handle, - terminal_difficulty: Uint256, - terminal_block: u64, - terminal_block_hash: ExecutionBlockHash, - ) -> Self { + pub fn new_with_config(handle: &runtime::Handle, config: MockExecutionConfig) -> Self { + let MockExecutionConfig { + jwt_key, + terminal_difficulty, + terminal_block, + terminal_block_hash, + server_config, + } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); let execution_block_generator = ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash); let ctx: Arc> = Arc::new(Context { - config: <_>::default(), + config: server_config, + jwt_key, log: null_logger().unwrap(), last_echo_request: last_echo_request.clone(), execution_block_generator: RwLock::new(execution_block_generator), @@ -69,6 +97,7 @@ impl MockServer { preloaded_responses, static_new_payload_response: <_>::default(), static_forkchoice_updated_response: <_>::default(), + static_get_block_by_hash_response: <_>::default(), _phantom: PhantomData, }); @@ -99,6 +128,25 @@ impl MockServer { } } + pub fn new( + handle: &runtime::Handle, + jwt_key: JwtKey, + terminal_difficulty: Uint256, + terminal_block: u64, + terminal_block_hash: ExecutionBlockHash, + ) -> Self { + Self::new_with_config( + handle, + MockExecutionConfig { + server_config: Config::default(), + jwt_key, + terminal_difficulty, + terminal_block, + terminal_block_hash, + }, + ) + } + pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator> { self.ctx.execution_block_generator.write() } @@ -198,8 +246,8 @@ impl MockServer { fn invalid_terminal_block_status() -> PayloadStatusV1 { PayloadStatusV1 { - status: PayloadStatusV1Status::InvalidTerminalBlock, - latest_valid_hash: None, + status: PayloadStatusV1Status::Invalid, + latest_valid_hash: Some(ExecutionBlockHash::zero()), validation_error: Some("static response".into()), } } @@ -271,6 +319,16 @@ impl MockServer { self.set_forkchoice_updated_response(Self::invalid_terminal_block_status()); } + /// This will make the node appear like it is syncing. + pub fn all_get_block_by_hash_requests_return_none(&self) { + *self.ctx.static_get_block_by_hash_response.lock() = Some(None); + } + + /// The node will respond "naturally"; it will return blocks if they're known to it. + pub fn all_get_block_by_hash_requests_return_natural_value(&self) { + *self.ctx.static_get_block_by_hash_response.lock() = None; + } + /// Disables any static payload responses so the execution block generator will do its own /// verification. pub fn full_payload_verification(&self) { @@ -290,6 +348,7 @@ impl MockServer { block_hash, parent_hash, total_difficulty, + timestamp: block_number, }); self.ctx @@ -351,6 +410,7 @@ impl warp::reject::Reject for AuthError {} /// The server will gracefully handle the case where any fields are `None`. pub struct Context { pub config: Config, + pub jwt_key: JwtKey, pub log: Logger, pub last_echo_request: Arc>>, pub execution_block_generator: RwLock>, @@ -358,6 +418,7 @@ pub struct Context { pub previous_request: Arc>>, pub static_new_payload_response: Arc>>, pub static_forkchoice_updated_response: Arc>>, + pub static_get_block_by_hash_response: Arc>>>, pub _phantom: PhantomData, } @@ -386,28 +447,30 @@ struct ErrorMessage { /// Returns a `warp` header which filters out request that has a missing or incorrectly /// signed JWT token. -fn auth_header_filter() -> warp::filters::BoxedFilter<()> { +fn auth_header_filter(jwt_key: JwtKey) -> warp::filters::BoxedFilter<()> { warp::any() .and(warp::filters::header::optional("Authorization")) - .and_then(move |authorization: Option| async move { - match authorization { - None => Err(warp::reject::custom(AuthError( - "auth absent from request".to_string(), - ))), - Some(auth) => { - if let Some(token) = auth.strip_prefix("Bearer ") { - let secret = JwtKey::from_slice(&JWT_SECRET).unwrap(); - match Auth::validate_token(token, &secret) { - Ok(_) => Ok(()), - Err(e) => Err(warp::reject::custom(AuthError(format!( - "Auth failure: {:?}", - e - )))), + .and_then(move |authorization: Option| { + let secret = jwt_key.clone(); + async move { + match authorization { + None => Err(warp::reject::custom(AuthError( + "auth absent from request".to_string(), + ))), + Some(auth) => { + if let Some(token) = auth.strip_prefix("Bearer ") { + match Auth::validate_token(token, &secret) { + Ok(_) => Ok(()), + Err(e) => Err(warp::reject::custom(AuthError(format!( + "Auth failure: {:?}", + e + )))), + } + } else { + Err(warp::reject::custom(AuthError( + "Bearer token not present in auth header".to_string(), + ))) } - } else { - Err(warp::reject::custom(AuthError( - "Bearer token not present in auth header".to_string(), - ))) } } } @@ -523,7 +586,7 @@ pub fn serve( }); let routes = warp::post() - .and(auth_header_filter()) + .and(auth_header_filter(ctx.jwt_key.clone())) .and(root.or(echo)) .recover(handle_rejection) // Add a `Server` header. diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index aac13a324f..089f79aa11 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -112,7 +112,7 @@ impl Eth1GenesisService { "Importing eth1 deposit logs"; ); - let endpoints = eth1_service.init_endpoints(); + let endpoints = eth1_service.init_endpoints()?; loop { let update_result = eth1_service diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index ccf8fe10c9..1233d99fd3 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -3,6 +3,7 @@ mod eth1_genesis_service; mod interop; pub use eth1::Config as Eth1Config; +pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use interop::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index 8b77c89471..74a054fcc0 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -4,7 +4,7 @@ //! dir in the root of the `lighthouse` repo. #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; -use eth1::{DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; +use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance}; use genesis::{Eth1Config, Eth1GenesisService}; use sensitive_url::SensitiveUrl; @@ -29,7 +29,7 @@ fn basic() { let mut spec = env.eth2_config().spec.clone(); env.runtime().block_on(async { - let eth1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()) + let eth1 = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()) .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; @@ -44,7 +44,10 @@ fn basic() { let service = Eth1GenesisService::new( Eth1Config { - endpoints: vec![SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap()], + endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse( + eth1.endpoint().as_str(), + ) + .unwrap()]), deposit_contract_address: deposit_contract.address(), deposit_contract_deploy_block: now, lowest_cached_block_number: now, diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index a34618c2ef..fedd66c540 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -31,14 +31,17 @@ execution_layer = {path = "../execution_layer"} parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } - +lru = "0.7.7" +tree_hash = "0.4.1" [dev-dependencies] store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } logging = { path = "../../common/logging" } +serde_json = "1.0.58" +proto_array = { path = "../../consensus/proto_array" } +unused_port = {path = "../../common/unused_port"} [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 2b4543656d..ca68d4d04c 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -83,6 +83,10 @@ pub fn get_attestation_performance( } // Either use the global validator set, or the specified index. + // + // Does no further validation of the indices, so in the event an index has not yet been + // activated or does not yet exist (according to the head state), it will return all fields as + // `false`. let index_range = if target.to_lowercase() == "global" { chain .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 4503385637..4878ef60d8 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -56,15 +56,19 @@ fn cached_attestation_duties( request_indices: &[u64], chain: &BeaconChain, ) -> Result { - let head = chain - .head_info() + let head_block_root = chain.canonical_head.cached_head().head_block_root(); + + let (duties, dependent_root, execution_status) = chain + .validator_attestation_duties(request_indices, request_epoch, head_block_root) .map_err(warp_utils::reject::beacon_chain_error)?; - let (duties, dependent_root) = chain - .validator_attestation_duties(request_indices, request_epoch, head.block_root) - .map_err(warp_utils::reject::beacon_chain_error)?; - - convert_to_api_response(duties, request_indices, dependent_root, chain) + convert_to_api_response( + duties, + request_indices, + dependent_root, + execution_status.is_optimistic_or_invalid(), + chain, + ) } /// Compute some attester duties by reading a `BeaconState` from disk, completely ignoring the @@ -76,31 +80,41 @@ fn compute_historic_attester_duties( ) -> Result { // If the head is quite old then it might still be relevant for a historical request. // - // Use the `with_head` function to read & clone in a single call to avoid race conditions. - let state_opt = chain - .with_head(|head| { - if head.beacon_state.current_epoch() <= request_epoch { - Ok(Some((head.beacon_state_root(), head.beacon_state.clone()))) - } else { - Ok(None) - } - }) - .map_err(warp_utils::reject::beacon_chain_error)?; + // Avoid holding the `cached_head` longer than necessary. + let state_opt = { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + let head = &cached_head.snapshot; - let mut state = if let Some((state_root, mut state)) = state_opt { - // If we've loaded the head state it might be from a previous epoch, ensure it's in a - // suitable epoch. - ensure_state_knows_attester_duties_for_epoch( - &mut state, - state_root, - request_epoch, - &chain.spec, - )?; - state - } else { - StateId::slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + if head.beacon_state.current_epoch() <= request_epoch { + Some(( + head.beacon_state_root(), + head.beacon_state.clone(), + execution_status.is_optimistic_or_invalid(), + )) + } else { + None + } }; + let (mut state, execution_optimistic) = + if let Some((state_root, mut state, execution_optimistic)) = state_opt { + // If we've loaded the head state it might be from a previous epoch, ensure it's in a + // suitable epoch. + ensure_state_knows_attester_duties_for_epoch( + &mut state, + state_root, + request_epoch, + &chain.spec, + )?; + (state, execution_optimistic) + } else { + StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(chain)? + }; + // Sanity-check the state lookup. if !(state.current_epoch() == request_epoch || state.current_epoch() + 1 == request_epoch) { return Err(warp_utils::reject::custom_server_error(format!( @@ -136,7 +150,13 @@ fn compute_historic_attester_duties( .collect::>() .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(duties, request_indices, dependent_root, chain) + convert_to_api_response( + duties, + request_indices, + dependent_root, + execution_optimistic, + chain, + ) } fn ensure_state_knows_attester_duties_for_epoch( @@ -174,6 +194,7 @@ fn convert_to_api_response( duties: Vec>, indices: &[u64], dependent_root: Hash256, + execution_optimistic: bool, chain: &BeaconChain, ) -> Result { // Protect against an inconsistent slot clock. @@ -209,6 +230,7 @@ fn convert_to_api_response( Ok(api_types::DutiesResponse { dependent_root, + execution_optimistic: Some(execution_optimistic), data, }) } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 727215bfca..5c785fe651 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,7 +1,10 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes, WhenSlotSkipped}; +use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlockId as CoreBlockId; +use std::fmt; use std::str::FromStr; -use types::{BlindedPayload, Hash256, SignedBeaconBlock, Slot}; +use std::sync::Arc; +use types::{Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot}; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -21,33 +24,78 @@ impl BlockId { pub fn root( &self, chain: &BeaconChain, - ) -> Result { + ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { match &self.0 { - CoreBlockId::Head => chain - .head_info() - .map(|head| head.block_root) - .map_err(warp_utils::reject::beacon_chain_error), - CoreBlockId::Genesis => Ok(chain.genesis_block_root), - CoreBlockId::Finalized => chain - .head_info() - .map(|head| head.finalized_checkpoint.root) - .map_err(warp_utils::reject::beacon_chain_error), - CoreBlockId::Justified => chain - .head_info() - .map(|head| head.current_justified_checkpoint.root) - .map_err(warp_utils::reject::beacon_chain_error), - CoreBlockId::Slot(slot) => chain - .block_root_at_slot(*slot, WhenSlotSkipped::None) - .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|root_opt| { - root_opt.ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "beacon block at slot {}", - slot - )) - }) - }), - CoreBlockId::Root(root) => Ok(*root), + CoreBlockId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok(( + cached_head.head_block_root(), + execution_status.is_optimistic_or_invalid(), + )) + } + CoreBlockId::Genesis => Ok((chain.genesis_block_root, false)), + CoreBlockId::Finalized => { + let finalized_checkpoint = + chain.canonical_head.cached_head().finalized_checkpoint(); + let (_slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?; + Ok((finalized_checkpoint.root, execution_optimistic)) + } + CoreBlockId::Justified => { + let justified_checkpoint = + chain.canonical_head.cached_head().justified_checkpoint(); + let (_slot, execution_optimistic) = + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?; + Ok((justified_checkpoint.root, execution_optimistic)) + } + CoreBlockId::Slot(slot) => { + let execution_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + let root = chain + .block_root_at_slot(*slot, WhenSlotSkipped::None) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block at slot {}", + slot + )) + }) + })?; + Ok((root, execution_optimistic)) + } + CoreBlockId::Root(root) => { + // This matches the behaviour of other consensus clients (e.g. Teku). + if root == &Hash256::zero() { + return Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))); + }; + if chain + .store + .block_exists(root) + .map_err(BeaconChainError::DBError) + .map_err(warp_utils::reject::beacon_chain_error)? + { + let execution_optimistic = chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok((*root, execution_optimistic)) + } else { + Err(warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + ))) + } + } } } @@ -55,14 +103,20 @@ impl BlockId { pub fn blinded_block( &self, chain: &BeaconChain, - ) -> Result>, warp::Rejection> { + ) -> Result<(SignedBlindedBeaconBlock, ExecutionOptimistic), warp::Rejection> { match &self.0 { - CoreBlockId::Head => chain - .head_beacon_block() - .map(Into::into) - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok(( + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + )) + } CoreBlockId::Slot(slot) => { - let root = self.root(chain)?; + let (root, execution_optimistic) = self.root(chain)?; chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) @@ -74,7 +128,7 @@ impl BlockId { slot ))); } - Ok(block) + Ok((block, execution_optimistic)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -83,8 +137,8 @@ impl BlockId { }) } _ => { - let root = self.root(chain)?; - chain + let (root, execution_optimistic) = self.root(chain)?; + let block = chain .get_blinded_block(&root) .map_err(warp_utils::reject::beacon_chain_error) .and_then(|root_opt| { @@ -94,7 +148,8 @@ impl BlockId { root )) }) - }) + })?; + Ok((block, execution_optimistic)) } } } @@ -103,13 +158,20 @@ impl BlockId { pub async fn full_block( &self, chain: &BeaconChain, - ) -> Result, warp::Rejection> { + ) -> Result<(Arc>, ExecutionOptimistic), warp::Rejection> { match &self.0 { - CoreBlockId::Head => chain - .head_beacon_block() - .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Head => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + Ok(( + cached_head.snapshot.beacon_block.clone(), + execution_status.is_optimistic_or_invalid(), + )) + } CoreBlockId::Slot(slot) => { - let root = self.root(chain)?; + let (root, execution_optimistic) = self.root(chain)?; chain .get_block(&root) .await @@ -122,7 +184,7 @@ impl BlockId { slot ))); } - Ok(block) + Ok((Arc::new(block), execution_optimistic)) } None => Err(warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -131,18 +193,20 @@ impl BlockId { }) } _ => { - let root = self.root(chain)?; + let (root, execution_optimistic) = self.root(chain)?; chain .get_block(&root) .await .map_err(warp_utils::reject::beacon_chain_error) - .and_then(|root_opt| { - root_opt.ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "beacon block with root {}", - root - )) - }) + .and_then(|block_opt| { + block_opt + .map(|block| (Arc::new(block), execution_optimistic)) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) }) } } @@ -156,3 +220,9 @@ impl FromStr for BlockId { CoreBlockId::from_str(s).map(Self) } } + +impl fmt::Display for BlockId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 154773aa95..3b81b894db 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -1,10 +1,17 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; -use slog::{warn, Logger}; +use lru::LruCache; +use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; use std::sync::Arc; -use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; +use types::BeaconBlock; +use warp_utils::reject::{ + beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, +}; +const STATE_CACHE_SIZE: usize = 2; + +/// Fetch block rewards for blocks from the canonical chain. pub fn get_block_rewards( query: BlockRewardsQuery, chain: Arc>, @@ -45,13 +52,21 @@ pub fn get_block_rewards( .build_all_caches(&chain.spec) .map_err(beacon_state_error)?; + let mut reward_cache = Default::default(); let mut block_rewards = Vec::with_capacity(blocks.len()); let block_replayer = BlockReplayer::new(state, &chain.spec) .pre_block_hook(Box::new(|state, block| { + state.build_all_committee_caches(&chain.spec)?; + // Compute block reward. - let block_reward = - chain.compute_block_reward(block.message(), block.canonical_root(), state)?; + let block_reward = chain.compute_block_reward( + block.message(), + block.canonical_root(), + state, + &mut reward_cache, + query.include_attestations, + )?; block_rewards.push(block_reward); Ok(()) })) @@ -78,3 +93,96 @@ pub fn get_block_rewards( Ok(block_rewards) } + +/// Compute block rewards for blocks passed in as input. +pub fn compute_block_rewards( + blocks: Vec>, + chain: Arc>, + log: Logger, +) -> Result, warp::Rejection> { + let mut block_rewards = Vec::with_capacity(blocks.len()); + let mut state_cache = LruCache::new(STATE_CACHE_SIZE); + let mut reward_cache = Default::default(); + + for block in blocks { + let parent_root = block.parent_root(); + + // Check LRU cache for a constructed state from a previous iteration. + let state = if let Some(state) = state_cache.get(&(parent_root, block.slot())) { + debug!( + log, + "Re-using cached state for block rewards"; + "parent_root" => ?parent_root, + "slot" => block.slot(), + ); + state + } else { + debug!( + log, + "Fetching state for block rewards"; + "parent_root" => ?parent_root, + "slot" => block.slot() + ); + let parent_block = chain + .get_blinded_block(&parent_root) + .map_err(beacon_chain_error)? + .ok_or_else(|| { + custom_bad_request(format!( + "parent block not known or not canonical: {:?}", + parent_root + )) + })?; + + let parent_state = chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .map_err(beacon_chain_error)? + .ok_or_else(|| { + custom_bad_request(format!( + "no state known for parent block: {:?}", + parent_root + )) + })?; + + let block_replayer = BlockReplayer::new(parent_state, &chain.spec) + .no_signature_verification() + .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) + .minimal_block_root_verification() + .apply_blocks(vec![], Some(block.slot())) + .map_err(beacon_chain_error)?; + + if block_replayer.state_root_miss() { + warn!( + log, + "Block reward state root miss"; + "parent_slot" => parent_block.slot(), + "slot" => block.slot(), + ); + } + + let mut state = block_replayer.into_state(); + state + .build_all_committee_caches(&chain.spec) + .map_err(beacon_state_error)?; + + state_cache + .get_or_insert((parent_root, block.slot()), || state) + .ok_or_else(|| { + custom_server_error("LRU cache insert should always succeed".into()) + })? + }; + + // Compute block reward. + let block_reward = chain + .compute_block_reward( + block.to_ref(), + block.canonical_root(), + state, + &mut reward_cache, + true, + ) + .map_err(beacon_chain_error)?; + block_rewards.push(block_reward); + } + + Ok(block_rewards) +} diff --git a/beacon_node/http_api/src/database.rs b/beacon_node/http_api/src/database.rs index 014db8a602..645c19c40e 100644 --- a/beacon_node/http_api/src/database.rs +++ b/beacon_node/http_api/src/database.rs @@ -22,7 +22,7 @@ pub fn info( pub fn historical_blocks( chain: Arc>, - blocks: Vec>, + blocks: Vec>>, ) -> Result { chain .import_historical_block_batch(blocks) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 166ec9147f..0cb9c056bf 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,28 +13,27 @@ mod block_rewards; mod database; mod metrics; mod proposer_duties; +mod publish_blocks; mod state_id; mod sync_committees; mod validator_inclusion; mod version; use beacon_chain::{ - attestation_verification::VerifiedAttestation, - observed_operations::ObservationOutcome, - validator_monitor::{get_block_delay_ms, timestamp_now}, - AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - HeadSafetyStatus, ProduceBlockVerification, WhenSlotSkipped, + attestation_verification::VerifiedAttestation, observed_operations::ObservationOutcome, + validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, + BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; -use block_id::BlockId; -use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; +pub use block_id::BlockId; +use eth2::types::{self as api_types, EndpointVersion, ValidatorId, ValidatorStatus}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; -use network::NetworkMessage; +use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -use state_id::StateId; +pub use state_id::StateId; use std::borrow::Cow; use std::convert::TryInto; use std::future::Future; @@ -42,19 +41,19 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; -use tokio::sync::mpsc::UnboundedSender; +use tokio::sync::mpsc::{Sender, UnboundedSender}; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttesterSlashing, BeaconBlockBodyMerge, BeaconBlockMerge, BeaconStateError, - BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, + Attestation, AttestationData, AttesterSlashing, BeaconStateError, BlindedPayload, + CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, - SignedContributionAndProof, SignedVoluntaryExit, Slot, SyncCommitteeMessage, + SignedBeaconBlock, SignedBlindedBeaconBlock, SignedContributionAndProof, + SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use version::{ - add_consensus_version_header, fork_versioned_response, inconsistent_fork_rejection, - unsupported_version_rejection, V1, + add_consensus_version_header, execution_optimistic_fork_versioned_response, + fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2, }; use warp::http::StatusCode; use warp::sse::Event; @@ -77,6 +76,9 @@ const SYNC_TOLERANCE_EPOCHS: u64 = 8; /// A custom type which allows for both unsecured and TLS-enabled HTTP servers. type HttpServer = (SocketAddr, Pin + Send>>); +/// Alias for readability. +pub type ExecutionOptimistic = bool; + /// Configuration used when serving the HTTP server over TLS. #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] pub struct TlsConfig { @@ -90,7 +92,7 @@ pub struct TlsConfig { pub struct Context { pub config: Config, pub chain: Option>>, - pub network_tx: Option>>, + pub network_senders: Option>, pub network_globals: Option>>, pub eth1_service: Option, pub log: Logger, @@ -103,9 +105,9 @@ pub struct Config { pub listen_addr: IpAddr, pub listen_port: u16, pub allow_origin: Option, - pub serve_legacy_spec: bool, pub tls_config: Option, pub allow_sync_stalled: bool, + pub spec_fork_name: Option, } impl Default for Config { @@ -115,9 +117,9 @@ impl Default for Config { listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: 5052, allow_origin: None, - serve_legacy_spec: true, tls_config: None, allow_sync_stalled: false, + spec_fork_name: None, } } } @@ -200,18 +202,29 @@ pub fn prometheus_metrics() -> warp::filters::log::Log( .untuple_one() }; - let eth1_v1 = single_version(V1); + let eth_v1 = single_version(V1); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -323,14 +336,35 @@ pub fn serve( }); // Create a `warp` filter that provides access to the network sender channel. - let inner_ctx = ctx.clone(); - let network_tx_filter = warp::any() - .map(move || inner_ctx.network_tx.clone()) - .and_then(|network_tx| async move { - match network_tx { - Some(network_tx) => Ok(network_tx), + let network_tx = ctx + .network_senders + .as_ref() + .map(|senders| senders.network_send()); + let network_tx_filter = + warp::any() + .map(move || network_tx.clone()) + .and_then(|network_tx| async move { + match network_tx { + Some(network_tx) => Ok(network_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started (network_tx).".to_string(), + )), + } + }); + + // Create a `warp` filter that provides access to the network attestation subscription channel. + let validator_subscriptions_tx = ctx + .network_senders + .as_ref() + .map(|senders| senders.validator_subscription_send()); + let validator_subscription_tx_filter = warp::any() + .map(move || validator_subscriptions_tx.clone()) + .and_then(|validator_subscriptions_tx| async move { + match validator_subscriptions_tx { + Some(validator_subscriptions_tx) => Ok(validator_subscriptions_tx), None => Err(warp_utils::reject::custom_not_found( - "The networking stack has not yet started.".to_string(), + "The networking stack has not yet started (validator_subscription_tx)." + .to_string(), )), } }); @@ -358,9 +392,7 @@ pub fn serve( chain: Arc>| async move { match *network_globals.sync_state.read() { SyncState::SyncingFinalized { .. } => { - let head_slot = chain - .best_slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = chain.canonical_head.cached_head().head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { @@ -393,35 +425,6 @@ pub fn serve( ) .untuple_one(); - // Create a `warp` filter that rejects requests unless the head has been verified by the - // execution layer. - let only_with_safe_head = warp::any() - .and(chain_filter.clone()) - .and_then(move |chain: Arc>| async move { - let status = chain.head_safety_status().map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to read head safety status: {:?}", - e - )) - })?; - match status { - HeadSafetyStatus::Safe(_) => Ok(()), - HeadSafetyStatus::Unsafe(hash) => { - Err(warp_utils::reject::custom_server_error(format!( - "optimistic head hash {:?} has not been verified by the execution layer", - hash - ))) - } - HeadSafetyStatus::Invalid(hash) => { - Err(warp_utils::reject::custom_server_error(format!( - "the head block has an invalid payload {:?}, this may be unrecoverable", - hash - ))) - } - } - }) - .untuple_one(); - // Create a `warp` filter that provides access to the logger. let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); @@ -433,22 +436,19 @@ pub fn serve( */ // GET beacon/genesis - let get_beacon_genesis = eth1_v1 + let get_beacon_genesis = eth_v1 .and(warp::path("beacon")) .and(warp::path("genesis")) .and(warp::path::end()) .and(chain_filter.clone()) .and_then(|chain: Arc>| { blocking_json_task(move || { - chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error) - .map(|head| api_types::GenesisData { - genesis_time: head.genesis_time, - genesis_validators_root: head.genesis_validators_root, - genesis_fork_version: chain.spec.genesis_fork_version, - }) - .map(api_types::GenericResponse::from) + let genesis_data = api_types::GenesisData { + genesis_time: chain.genesis_time, + genesis_validators_root: chain.genesis_validators_root, + genesis_fork_version: chain.spec.genesis_fork_version, + }; + Ok(api_types::GenericResponse::from(genesis_data)) }) }); @@ -456,7 +456,7 @@ pub fn serve( * beacon/states/{state_id} */ - let beacon_states_path = eth1_v1 + let beacon_states_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("states")) .and(warp::path::param::().or_else(|_| async { @@ -473,10 +473,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - state_id - .root(&chain) + let (root, execution_optimistic) = state_id.root(&chain)?; + + Ok(root) .map(api_types::RootData::from) .map(api_types::GenericResponse::from) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) }) }); @@ -486,7 +488,14 @@ pub fn serve( .and(warp::path("fork")) .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { - blocking_json_task(move || state_id.fork(&chain).map(api_types::GenericResponse::from)) + blocking_json_task(move || { + let (fork, execution_optimistic) = + state_id.fork_and_execution_optimistic(&chain)?; + Ok(api_types::ExecutionOptimisticResponse { + data: fork, + execution_optimistic: Some(execution_optimistic), + }) + }) }); // GET beacon/states/{state_id}/finality_checkpoints @@ -496,15 +505,24 @@ pub fn serve( .and(warp::path::end()) .and_then(|state_id: StateId, chain: Arc>| { blocking_json_task(move || { - state_id - .map_state(&chain, |state| { - Ok(api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }) - }) - .map(api_types::GenericResponse::from) + let (data, execution_optimistic) = state_id.map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + Ok(( + api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }); @@ -520,35 +538,45 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - state_id - .map_state(&chain, |state| { - Ok(state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - validator.pubkey() == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 - } + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + query.id.as_ref().map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => { + validator.pubkey() == pubkey + } + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) }) - }) - }) - .map(|(index, (_, balance))| { - Some(api_types::ValidatorBalanceData { - index: index as u64, - balance: *balance, - }) - }) - .collect::>()) - }) - .map(api_types::GenericResponse::from) + .map(|(index, (_, balance))| { + Some(api_types::ValidatorBalanceData { + index: index as u64, + balance: *balance, + }) + }) + .collect::>(), + execution_optimistic, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }, ); @@ -565,57 +593,67 @@ pub fn serve( query_res: Result| { blocking_json_task(move || { let query = query_res?; - state_id - .map_state(&chain, |state| { - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; - Ok(state - .validators() - .iter() - .zip(state.balances().iter()) - .enumerate() - // filter by validator id(s) if provided - .filter(|(index, (validator, _))| { - query.id.as_ref().map_or(true, |ids| { - ids.iter().any(|id| match id { - ValidatorId::PublicKey(pubkey) => { - validator.pubkey() == pubkey - } - ValidatorId::Index(param_index) => { - *param_index == *index as u64 + Ok(( + state + .validators() + .iter() + .zip(state.balances().iter()) + .enumerate() + // filter by validator id(s) if provided + .filter(|(index, (validator, _))| { + query.id.as_ref().map_or(true, |ids| { + ids.iter().any(|id| match id { + ValidatorId::PublicKey(pubkey) => { + validator.pubkey() == pubkey + } + ValidatorId::Index(param_index) => { + *param_index == *index as u64 + } + }) + }) + }) + // filter by status(es) if provided and map the result + .filter_map(|(index, (validator, balance))| { + let status = api_types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ); + + let status_matches = + query.status.as_ref().map_or(true, |statuses| { + statuses.contains(&status) + || statuses.contains(&status.superstatus()) + }); + + if status_matches { + Some(api_types::ValidatorData { + index: index as u64, + balance: *balance, + status, + validator: validator.clone(), + }) + } else { + None } }) - }) - }) - // filter by status(es) if provided and map the result - .filter_map(|(index, (validator, balance))| { - let status = api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ); + .collect::>(), + execution_optimistic, + )) + }, + )?; - let status_matches = - query.status.as_ref().map_or(true, |statuses| { - statuses.contains(&status) - || statuses.contains(&status.superstatus()) - }); - - if status_matches { - Some(api_types::ValidatorData { - index: index as u64, - balance: *balance, - status, - validator: validator.clone(), - }) - } else { - None - } - }) - .collect::>()) - }) - .map(api_types::GenericResponse::from) + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }, ); @@ -633,41 +671,51 @@ pub fn serve( .and_then( |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { blocking_json_task(move || { - state_id - .map_state(&chain, |state| { - let index_opt = match &validator_id { - ValidatorId::PublicKey(pubkey) => { - state.validators().iter().position(|v| v.pubkey() == pubkey) - } - ValidatorId::Index(index) => Some(*index as usize), - }; + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => { + state.validators().iter().position(|v| v.pubkey() == pubkey) + } + ValidatorId::Index(index) => Some(*index as usize), + }; - index_opt - .and_then(|index| { - let validator = state.validators().get(index)?; - let balance = *state.balances().get(index)?; - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; + Ok(( + index_opt + .and_then(|index| { + let validator = state.validators().get(index)?; + let balance = *state.balances().get(index)?; + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; - Some(api_types::ValidatorData { - index: index as u64, - balance, - status: api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ), - validator: validator.clone(), - }) - }) - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "unknown validator: {}", - validator_id - )) - }) - }) - .map(api_types::GenericResponse::from) + Some(api_types::ValidatorData { + index: index as u64, + balance, + status: api_types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "unknown validator: {}", + validator_id + )) + })?, + execution_optimistic, + )) + }, + )?; + + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), + }) }) }, ); @@ -680,72 +728,99 @@ pub fn serve( .and(warp::path::end()) .and_then( |state_id: StateId, chain: Arc>, query: api_types::CommitteesQuery| { - // the api spec says if the epoch is not present then the epoch of the state should be used - let query_state_id = query.epoch.map_or(state_id, |epoch| { - StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) - }); - blocking_json_task(move || { - query_state_id.map_state(&chain, |state| { - let epoch = state.slot().epoch(T::EthSpec::slots_per_epoch()); + let (data, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); - let committee_cache = if state - .committee_cache_is_initialized(RelativeEpoch::Current) - { - state - .committee_cache(RelativeEpoch::Current) - .map(Cow::Borrowed) - } else { - CommitteeCache::initialized(state, epoch, &chain.spec).map(Cow::Owned) - } - .map_err(BeaconChainError::BeaconStateError) - .map_err(warp_utils::reject::beacon_chain_error)?; + let committee_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state + .committee_cache_is_initialized(relative_epoch) => + { + state.committee_cache(relative_epoch).map(Cow::Borrowed) + } + _ => CommitteeCache::initialized(state, epoch, &chain.spec) + .map(Cow::Owned), + } + .map_err(|e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() as u64; + let first_subsequent_restore_point_slot = ((epoch + .start_slot(T::EthSpec::slots_per_epoch()) + / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request(format!( + "epoch out of bounds, try state at slot {}", + first_subsequent_restore_point_slot, + )) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, too far in future".into(), + ) + } + } + _ => warp_utils::reject::beacon_chain_error(e.into()), + })?; - // Use either the supplied slot or all slots in the epoch. - let slots = query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { - epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() - }); + // Use either the supplied slot or all slots in the epoch. + let slots = + query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); - // Use either the supplied committee index or all available indices. - let indices = query.index.map(|index| vec![index]).unwrap_or_else(|| { - (0..committee_cache.committees_per_slot()).collect() - }); + // Use either the supplied committee index or all available indices. + let indices = + query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); - let mut response = Vec::with_capacity(slots.len() * indices.len()); + let mut response = Vec::with_capacity(slots.len() * indices.len()); - for slot in slots { - // It is not acceptable to query with a slot that is not within the - // specified epoch. - if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { - return Err(warp_utils::reject::custom_bad_request(format!( - "{} is not in epoch {}", - slot, epoch - ))); - } + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request( + format!("{} is not in epoch {}", slot, epoch), + )); + } - for &index in &indices { - let committee = committee_cache - .get_beacon_committee(slot, index) - .ok_or_else(|| { - warp_utils::reject::custom_bad_request(format!( - "committee index {} does not exist in epoch {}", - index, epoch - )) - })?; + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; - response.push(api_types::CommitteeData { - index, - slot, - validators: committee - .committee - .iter() - .map(|i| *i as u64) - .collect(), - }); - } - } + response.push(api_types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } + } - Ok(api_types::GenericResponse::from(response)) + Ok((response, execution_optimistic)) + }, + )?; + Ok(api_types::ExecutionOptimisticResponse { + data, + execution_optimistic: Some(execution_optimistic), }) }) }, @@ -762,28 +837,35 @@ pub fn serve( chain: Arc>, query: api_types::SyncCommitteesQuery| { blocking_json_task(move || { - let sync_committee = state_id.map_state(&chain, |state| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - state - .get_built_sync_committee(epoch, &chain.spec) - .map(|committee| committee.clone()) - .map_err(|e| match e { - BeaconStateError::SyncCommitteeNotKnown { .. } => { - warp_utils::reject::custom_bad_request(format!( + let (sync_committee, execution_optimistic) = state_id + .map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + Ok(( + state + .get_built_sync_committee(epoch, &chain.spec) + .map(|committee| committee.clone()) + .map_err(|e| match e { + BeaconStateError::SyncCommitteeNotKnown { .. } => { + warp_utils::reject::custom_bad_request(format!( "state at epoch {} has no sync committee for epoch {}", current_epoch, epoch )) - } - BeaconStateError::IncorrectStateVariant => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} is not activated for Altair", - current_epoch, - )) - } - e => warp_utils::reject::beacon_state_error(e), - }) - })?; + } + BeaconStateError::IncorrectStateVariant => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} is not activated for Altair", + current_epoch, + )) + } + e => warp_utils::reject::beacon_state_error(e), + })?, + execution_optimistic, + )) + }, + )?; let validators = chain .validator_indices(sync_committee.pubkeys.iter()) @@ -801,7 +883,8 @@ pub fn serve( validator_aggregates, }; - Ok(api_types::GenericResponse::from(response)) + Ok(api_types::GenericResponse::from(response) + .add_execution_optimistic(execution_optimistic)) }) }, ); @@ -813,7 +896,7 @@ pub fn serve( // things. Returning non-canonical things is hard for us since we don't already have a // mechanism for arbitrary forwards block iteration, we only support iterating forwards along // the canonical chain. - let get_beacon_headers = eth1_v1 + let get_beacon_headers = eth_v1 .and(warp::path("beacon")) .and(warp::path("headers")) .and(warp::query::()) @@ -822,15 +905,24 @@ pub fn serve( .and_then( |query: api_types::HeadersQuery, chain: Arc>| { blocking_json_task(move || { - let (root, block) = match (query.slot, query.parent_root) { + let (root, block, execution_optimistic) = match (query.slot, query.parent_root) + { // No query parameters, return the canonical head block. - (None, None) => chain - .head_beacon_block() - .map_err(warp_utils::reject::beacon_chain_error) - .map(|block| (block.canonical_root(), block.into()))?, + (None, None) => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + ( + cached_head.head_block_root(), + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + ) + } // Only the parent root parameter, do a forwards-iterator lookup. (None, Some(parent_root)) => { - let parent = BlockId::from_root(parent_root).blinded_block(&chain)?; + let (parent, execution_optimistic) = + BlockId::from_root(parent_root).blinded_block(&chain)?; let (root, _slot) = chain .forwards_iter_block_roots(parent.slot()) .map_err(warp_utils::reject::beacon_chain_error)? @@ -849,13 +941,21 @@ pub fn serve( BlockId::from_root(root) .blinded_block(&chain) - .map(|block| (root, block))? + // Ignore this `execution_optimistic` since the first value has + // more information about the original request. + .map(|(block, _execution_optimistic)| { + (root, block, execution_optimistic) + })? } // Slot is supplied, search by slot and optionally filter by // parent root. (Some(slot), parent_root_opt) => { - let root = BlockId::from_slot(slot).root(&chain)?; - let block = BlockId::from_root(root).blinded_block(&chain)?; + let (root, execution_optimistic) = + BlockId::from_slot(slot).root(&chain)?; + // Ignore the second `execution_optimistic`, the first one is the + // most relevant since it knows that we queried by slot. + let (block, _execution_optimistic) = + BlockId::from_root(root).blinded_block(&chain)?; // If the parent root was supplied, check that it matches the block // obtained via a slot lookup. @@ -868,7 +968,7 @@ pub fn serve( } } - (root, block) + (root, block, execution_optimistic) } }; @@ -881,13 +981,14 @@ pub fn serve( }, }; - Ok(api_types::GenericResponse::from(vec![data])) + Ok(api_types::GenericResponse::from(vec![data]) + .add_execution_optimistic(execution_optimistic)) }) }, ); // GET beacon/headers/{block_id} - let get_beacon_headers_block_id = eth1_v1 + let get_beacon_headers_block_id = eth_v1 .and(warp::path("beacon")) .and(warp::path("headers")) .and(warp::path::param::().or_else(|_| async { @@ -899,8 +1000,11 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - let root = block_id.root(&chain)?; - let block = BlockId::from_root(root).blinded_block(&chain)?; + let (root, execution_optimistic) = block_id.root(&chain)?; + // Ignore the second `execution_optimistic` since the first one has more + // information about the original request. + let (block, _execution_optimistic) = + BlockId::from_root(root).blinded_block(&chain)?; let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) @@ -916,7 +1020,10 @@ pub fn serve( }, }; - Ok(api_types::GenericResponse::from(data)) + Ok(api_types::ExecutionOptimisticResponse { + execution_optimistic: Some(execution_optimistic), + data, + }) }) }); @@ -925,7 +1032,7 @@ pub fn serve( */ // POST beacon/blocks - let post_beacon_blocks = eth1_v1 + let post_beacon_blocks = eth_v1 .and(warp::path("beacon")) .and(warp::path("blocks")) .and(warp::path::end()) @@ -934,93 +1041,13 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: SignedBeaconBlock, + |block: Arc>, chain: Arc>, network_tx: UnboundedSender>, - log: Logger| { - blocking_json_task(move || { - let seen_timestamp = timestamp_now(); - - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(Box::new(block.clone())), - )?; - - // Determine the delay after the start of the slot, register it with metrics. - let delay = - get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - metrics::observe_duration( - &metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, - delay, - ); - - match chain.process_block(block.clone()) { - Ok(root) => { - info!( - log, - "Valid block from HTTP API"; - "block_delay" => ?delay, - "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), - "slot" => block.slot(), - ); - - // Notify the validator monitor. - chain.validator_monitor.read().register_api_block( - seen_timestamp, - block.message(), - root, - &chain.slot_clock, - ); - - // Update the head since it's likely this block will become the new - // head. - chain - .fork_choice() - .map_err(warp_utils::reject::beacon_chain_error)?; - - // Perform some logging to inform users if their blocks are being produced - // late. - // - // Check to see the thresholds are non-zero to avoid logging errors with small - // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } else if delay >= error_threshold { - error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } - - Ok(()) - } - Err(e) => { - let msg = format!("{:?}", e); - error!( - log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) - } - } - }) + log: Logger| async move { + publish_blocks::publish_block(block, chain, &network_tx, log) + .await + .map(|()| warp::reply()) }, ); @@ -1029,7 +1056,7 @@ pub fn serve( */ // POST beacon/blocks - let post_beacon_blinded_blocks = eth1_v1 + let post_beacon_blinded_blocks = eth_v1 .and(warp::path("beacon")) .and(warp::path("blinded_blocks")) .and(warp::path::end()) @@ -1041,96 +1068,10 @@ pub fn serve( |block: SignedBeaconBlock>, chain: Arc>, network_tx: UnboundedSender>, - _log: Logger| { - blocking_json_task(move || { - if let Some(el) = chain.execution_layer.as_ref() { - //FIXME(sean): we may not always receive the payload in this response because it - // should be the relay's job to propogate the block. However, since this block is - // already signed and sent this might be ok (so long as the relay validates - // the block before revealing the payload). - - //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should - // be able to support the normal block proposal flow, because at some point full block endpoints - // will be deprecated from the beacon API. This will entail creating full blocks in - // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded - // blocks. We will access the payload of those blocks here. This flow should happen if the - // execution layer has no payload builders or if we have not yet finalized post-merge transition. - let payload = el - .block_on(|el| el.propose_blinded_beacon_block(&block)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "proposal failed: {:?}", - e - )) - })?; - let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { - message: BeaconBlockMerge { - slot: block.message().slot(), - proposer_index: block.message().proposer_index(), - parent_root: block.message().parent_root(), - state_root: block.message().state_root(), - body: BeaconBlockBodyMerge { - randao_reveal: block.message().body().randao_reveal().clone(), - eth1_data: block.message().body().eth1_data().clone(), - graffiti: *block.message().body().graffiti(), - proposer_slashings: block - .message() - .body() - .proposer_slashings() - .clone(), - attester_slashings: block - .message() - .body() - .attester_slashings() - .clone(), - attestations: block.message().body().attestations().clone(), - deposits: block.message().body().deposits().clone(), - voluntary_exits: block - .message() - .body() - .voluntary_exits() - .clone(), - sync_aggregate: block - .message() - .body() - .sync_aggregate() - .unwrap() - .clone(), - execution_payload: payload.into(), - }, - }, - signature: block.signature().clone(), - }); - - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(Box::new(new_block.clone())), - )?; - - match chain.process_block(new_block) { - Ok(_) => { - // Update the head since it's likely this block will become the new - // head. - chain - .fork_choice() - .map_err(warp_utils::reject::beacon_chain_error)?; - - Ok(()) - } - Err(e) => { - let msg = format!("{:?}", e); - - Err(warp_utils::reject::broadcast_without_import(msg)) - } - } - } else { - Err(warp_utils::reject::custom_server_error( - "no execution layer found".to_string(), - )) - } - }) + log: Logger| async move { + publish_blocks::publish_blinded_block(block, chain, &network_tx, log) + .await + .map(|()| warp::reply()) }, ); @@ -1140,7 +1081,7 @@ pub fn serve( )) }); - let beacon_blocks_path_v1 = eth1_v1 + let beacon_blocks_path_v1 = eth_v1 .and(warp::path("beacon")) .and(warp::path("blocks")) .and(block_id_or_err) @@ -1163,10 +1104,11 @@ pub fn serve( chain: Arc>, accept_header: Option| { async move { - let block = block_id.full_block(&chain).await?; + let (block, execution_optimistic) = block_id.full_block(&chain).await?; let fork_name = block .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; + match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) @@ -1178,8 +1120,13 @@ pub fn serve( e )) }), - _ => fork_versioned_response(endpoint_version, fork_name, block) - .map(|res| warp::reply::json(&res).into_response()), + _ => execution_optimistic_fork_versioned_response( + endpoint_version, + fork_name, + execution_optimistic, + block, + ) + .map(|res| warp::reply::json(&res).into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) } @@ -1193,10 +1140,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - block_id - .root(&chain) - .map(api_types::RootData::from) - .map(api_types::GenericResponse::from) + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + Ok(api_types::GenericResponse::from(api_types::RootData::from( + block.canonical_root(), + )) + .add_execution_optimistic(execution_optimistic)) }) }); @@ -1207,10 +1156,12 @@ pub fn serve( .and(warp::path::end()) .and_then(|block_id: BlockId, chain: Arc>| { blocking_json_task(move || { - block_id - .blinded_block(&chain) - .map(|block| block.message().body().attestations().clone()) - .map(api_types::GenericResponse::from) + let (block, execution_optimistic) = block_id.blinded_block(&chain)?; + + Ok( + api_types::GenericResponse::from(block.message().body().attestations().clone()) + .add_execution_optimistic(execution_optimistic), + ) }) }); @@ -1218,7 +1169,7 @@ pub fn serve( * beacon/pool */ - let beacon_pool_path = eth1_v1 + let beacon_pool_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("pool")) .and(chain_filter.clone()); @@ -1239,12 +1190,46 @@ pub fn serve( blocking_json_task(move || { let seen_timestamp = timestamp_now(); let mut failures = Vec::new(); + let mut num_already_known = 0; for (index, attestation) in attestations.as_slice().iter().enumerate() { let attestation = match chain .verify_unaggregated_attestation_for_gossip(attestation, None) { Ok(attestation) => attestation, + Err(AttnError::PriorAttestationKnown { .. }) => { + num_already_known += 1; + + // Skip to the next attestation since an attestation for this + // validator is already known in this epoch. + // + // There's little value for the network in validating a second + // attestation for another validator since it is either: + // + // 1. A duplicate. + // 2. Slashable. + // 3. Invalid. + // + // We are likely to get duplicates in the case where a VC is using + // fallback BNs. If the first BN actually publishes some/all of a + // batch of attestations but fails to respond in a timely fashion, + // the VC is likely to try publishing the attestations on another + // BN. That second BN may have already seen the attestations from + // the first BN and therefore indicate that the attestations are + // "already seen". An attestation that has already been seen has + // been published on the network so there's no actual error from + // the perspective of the user. + // + // It's better to prevent slashable attestations from ever + // appearing on the network than trying to slash validators, + // especially those validators connected to the local API. + // + // There might be *some* value in determining that this attestation + // is invalid, but since a valid attestation already it exists it + // appears that this validator is capable of producing valid + // attestations and there's no immediate cause for concern. + continue; + } Err(e) => { error!(log, "Failure verifying attestation for gossip"; @@ -1311,6 +1296,15 @@ pub fn serve( )); } } + + if num_already_known > 0 { + debug!( + log, + "Some unagg attestations already known"; + "count" => num_already_known + ); + } + if failures.is_empty() { Ok(()) } else { @@ -1332,13 +1326,11 @@ pub fn serve( .and_then( |chain: Arc>, query: api_types::AttestationPoolQuery| { blocking_json_task(move || { - let query_filter = |attestation: &Attestation| { - query - .slot - .map_or(true, |slot| slot == attestation.data.slot) + let query_filter = |data: &AttestationData| { + query.slot.map_or(true, |slot| slot == data.slot) && query .committee_index - .map_or(true, |index| index == attestation.data.index) + .map_or(true, |index| index == data.index) }; let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); @@ -1348,7 +1340,7 @@ pub fn serve( .read() .iter() .cloned() - .filter(query_filter), + .filter(|att| query_filter(&att.data)), ); Ok(api_types::GenericResponse::from(attestations)) }) @@ -1390,9 +1382,7 @@ pub fn serve( )), )?; - chain - .import_attester_slashing(slashing) - .map_err(warp_utils::reject::beacon_chain_error)?; + chain.import_attester_slashing(slashing); } Ok(()) @@ -1546,7 +1536,7 @@ pub fn serve( * config */ - let config_path = eth1_v1.and(warp::path("config")); + let config_path = eth_v1.and(warp::path("config")); // GET config/fork_schedule let get_config_fork_schedule = config_path @@ -1564,18 +1554,15 @@ pub fn serve( }); // GET config/spec - let serve_legacy_spec = ctx.config.serve_legacy_spec; + let spec_fork_name = ctx.config.spec_fork_name; let get_config_spec = config_path .and(warp::path("spec")) .and(warp::path::end()) .and(chain_filter.clone()) .and_then(move |chain: Arc>| { blocking_json_task(move || { - let mut config_and_preset = - ConfigAndPreset::from_chain_spec::(&chain.spec); - if serve_legacy_spec { - config_and_preset.make_backwards_compat(&chain.spec); - } + let config_and_preset = + ConfigAndPreset::from_chain_spec::(&chain.spec, spec_fork_name); Ok(api_types::GenericResponse::from(config_and_preset)) }) }); @@ -1620,7 +1607,10 @@ pub fn serve( chain: Arc>| { blocking_task(move || match accept_header { Some(api_types::Accept::Ssz) => { - let state = state_id.state(&chain)?; + // We can ignore the optimistic status for the "fork" since it's a + // specification constant that doesn't change across competing heads of the + // beacon chain. + let (state, _execution_optimistic) = state_id.state(&chain)?; let fork_name = state .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; @@ -1636,44 +1626,71 @@ pub fn serve( )) }) } - _ => state_id.map_state(&chain, |state| { - let fork_name = state - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - let res = fork_versioned_response(endpoint_version, fork_name, &state)?; - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }), + _ => state_id.map_state_and_execution_optimistic( + &chain, + |state, execution_optimistic| { + let fork_name = state + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let res = execution_optimistic_fork_versioned_response( + endpoint_version, + fork_name, + execution_optimistic, + &state, + )?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }, + ), }) }, ); // GET debug/beacon/heads - let get_debug_beacon_heads = eth1_v1 + let get_debug_beacon_heads = any_version .and(warp::path("debug")) .and(warp::path("beacon")) .and(warp::path("heads")) .and(warp::path::end()) .and(chain_filter.clone()) - .and_then(|chain: Arc>| { - blocking_json_task(move || { - let heads = chain - .heads() - .into_iter() - .map(|(root, slot)| api_types::ChainHeadData { slot, root }) - .collect::>(); - Ok(api_types::GenericResponse::from(heads)) - }) - }); + .and_then( + |endpoint_version: EndpointVersion, chain: Arc>| { + blocking_json_task(move || { + let heads = chain + .heads() + .into_iter() + .map(|(root, slot)| { + let execution_optimistic = if endpoint_version == V1 { + None + } else if endpoint_version == V2 { + chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&root) + .ok() + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + Ok(api_types::ChainHeadData { + slot, + root, + execution_optimistic, + }) + }) + .collect::, warp::Rejection>>(); + Ok(api_types::GenericResponse::from(heads?)) + }) + }, + ); /* * node */ // GET node/identity - let get_node_identity = eth1_v1 + let get_node_identity = eth_v1 .and(warp::path("node")) .and(warp::path("identity")) .and(warp::path::end()) @@ -1711,7 +1728,7 @@ pub fn serve( }); // GET node/version - let get_node_version = eth1_v1 + let get_node_version = eth_v1 .and(warp::path("node")) .and(warp::path("version")) .and(warp::path::end()) @@ -1724,7 +1741,7 @@ pub fn serve( }); // GET node/syncing - let get_node_syncing = eth1_v1 + let get_node_syncing = eth_v1 .and(warp::path("node")) .and(warp::path("syncing")) .and(warp::path::end()) @@ -1733,19 +1750,21 @@ pub fn serve( .and_then( |network_globals: Arc>, chain: Arc>| { blocking_json_task(move || { - let head_slot = chain - .head_info() - .map(|info| info.slot) - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_slot = chain - .slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = chain.canonical_head.cached_head().head_slot(); + let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to read slot clock".into()) + })?; // Taking advantage of saturating subtraction on slot. let sync_distance = current_slot - head_slot; + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + let syncing_data = api_types::SyncingData { is_syncing: network_globals.sync_state.read().is_syncing(), + is_optimistic: Some(is_optimistic), head_slot, sync_distance, }; @@ -1756,7 +1775,7 @@ pub fn serve( ); // GET node/health - let get_node_health = eth1_v1 + let get_node_health = eth_v1 .and(warp::path("node")) .and(warp::path("health")) .and(warp::path::end()) @@ -1781,7 +1800,7 @@ pub fn serve( }); // GET node/peers/{peer_id} - let get_node_peers_by_id = eth1_v1 + let get_node_peers_by_id = eth_v1 .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::param::()) @@ -1838,7 +1857,7 @@ pub fn serve( ); // GET node/peers - let get_node_peers = eth1_v1 + let get_node_peers = eth_v1 .and(warp::path("node")) .and(warp::path("peers")) .and(warp::path::end()) @@ -1907,7 +1926,7 @@ pub fn serve( ); // GET node/peer_count - let get_node_peer_count = eth1_v1 + let get_node_peer_count = eth_v1 .and(warp::path("node")) .and(warp::path("peer_count")) .and(warp::path::end()) @@ -1948,7 +1967,7 @@ pub fn serve( */ // GET validator/duties/proposer/{epoch} - let get_validator_duties_proposer = eth1_v1 + let get_validator_duties_proposer = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("proposer")) @@ -1982,53 +2001,54 @@ pub fn serve( |endpoint_version: EndpointVersion, slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| { - blocking_json_task(move || { - let randao_reveal = query.randao_reveal.as_ref().map_or_else( - || { - if query.verify_randao { - Err(warp_utils::reject::custom_bad_request( - "randao_reveal is mandatory unless verify_randao=false".into(), - )) - } else { - Ok(Signature::empty()) - } - }, - |sig_bytes| { - sig_bytes.try_into().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - }) - }, - )?; + chain: Arc>| async move { + let randao_reveal = query.randao_reveal.as_ref().map_or_else( + || { + if query.verify_randao { + Err(warp_utils::reject::custom_bad_request( + "randao_reveal is mandatory unless verify_randao=false".into(), + )) + } else { + Ok(Signature::empty()) + } + }, + |sig_bytes| { + sig_bytes.try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + }) + }, + )?; - let randao_verification = if query.verify_randao { - ProduceBlockVerification::VerifyRandao - } else { - ProduceBlockVerification::NoVerification - }; + let randao_verification = if query.verify_randao { + ProduceBlockVerification::VerifyRandao + } else { + ProduceBlockVerification::NoVerification + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - }) + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + + fork_versioned_response(endpoint_version, fork_name, block) + .map(|response| warp::reply::json(&response)) }, ); // GET validator/blinded_blocks/{slot} - let get_validator_blinded_blocks = any_version + let get_validator_blinded_blocks = eth_v1 .and(warp::path("validator")) .and(warp::path("blinded_blocks")) .and(warp::path::param::().or_else(|_| async { @@ -2041,62 +2061,61 @@ pub fn serve( .and(warp::query::()) .and(chain_filter.clone()) .and_then( - |endpoint_version: EndpointVersion, - slot: Slot, + |slot: Slot, query: api_types::ValidatorBlocksQuery, - chain: Arc>| { - blocking_json_task(move || { - let randao_reveal = query.randao_reveal.as_ref().map_or_else( - || { - if query.verify_randao { - Err(warp_utils::reject::custom_bad_request( - "randao_reveal is mandatory unless verify_randao=false".into(), - )) - } else { - Ok(Signature::empty()) - } - }, - |sig_bytes| { - sig_bytes.try_into().map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "randao reveal is not a valid BLS signature: {:?}", - e - )) - }) - }, - )?; + chain: Arc>| async move { + let randao_reveal = query.randao_reveal.as_ref().map_or_else( + || { + if query.verify_randao { + Err(warp_utils::reject::custom_bad_request( + "randao_reveal is mandatory unless verify_randao=false".into(), + )) + } else { + Ok(Signature::empty()) + } + }, + |sig_bytes| { + sig_bytes.try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + }) + }, + )?; - let randao_verification = if query.verify_randao { - ProduceBlockVerification::VerifyRandao - } else { - ProduceBlockVerification::NoVerification - }; + let randao_verification = if query.verify_randao { + ProduceBlockVerification::VerifyRandao + } else { + ProduceBlockVerification::NoVerification + }; - let (block, _) = chain - .produce_block_with_verification::>( - randao_reveal, - slot, - query.graffiti.map(Into::into), - randao_verification, - ) - .map_err(warp_utils::reject::block_production_error)?; - let fork_name = block - .to_ref() - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - fork_versioned_response(endpoint_version, fork_name, block) - }) + let (block, _) = chain + .produce_block_with_verification::>( + randao_reveal, + slot, + query.graffiti.map(Into::into), + randao_verification, + ) + .await + .map_err(warp_utils::reject::block_production_error)?; + let fork_name = block + .to_ref() + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + // Pose as a V2 endpoint so we return the fork `version`. + fork_versioned_response(V2, fork_name, block) + .map(|response| warp::reply::json(&response)) }, ); // GET validator/attestation_data?slot,committee_index - let get_validator_attestation_data = eth1_v1 + let get_validator_attestation_data = eth_v1 .and(warp::path("validator")) .and(warp::path("attestation_data")) .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAttestationDataQuery, chain: Arc>| { @@ -2123,13 +2142,12 @@ pub fn serve( ); // GET validator/aggregate_attestation?attestation_data_root,slot - let get_validator_aggregate_attestation = eth1_v1 + let get_validator_aggregate_attestation = eth_v1 .and(warp::path("validator")) .and(warp::path("aggregate_attestation")) .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head.clone()) .and(chain_filter.clone()) .and_then( |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc>| { @@ -2156,7 +2174,7 @@ pub fn serve( ); // POST validator/duties/attester/{epoch} - let post_validator_duties_attester = eth1_v1 + let post_validator_duties_attester = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("attester")) @@ -2178,7 +2196,7 @@ pub fn serve( ); // POST validator/duties/sync - let post_validator_duties_sync = eth1_v1 + let post_validator_duties_sync = eth_v1 .and(warp::path("validator")) .and(warp::path("duties")) .and(warp::path("sync")) @@ -2200,19 +2218,24 @@ pub fn serve( ); // GET validator/sync_committee_contribution - let get_validator_sync_committee_contribution = eth1_v1 + let get_validator_sync_committee_contribution = eth_v1 .and(warp::path("validator")) .and(warp::path("sync_committee_contribution")) .and(warp::path::end()) .and(warp::query::()) .and(not_while_syncing_filter.clone()) - .and(only_with_safe_head) .and(chain_filter.clone()) .and_then( |sync_committee_data: SyncContributionData, chain: Arc>| { blocking_json_task(move || { chain .get_aggregated_sync_committee_contribution(&sync_committee_data) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch sync contribution: {:?}", + e + )) + })? .map(api_types::GenericResponse::from) .ok_or_else(|| { warp_utils::reject::custom_not_found( @@ -2224,7 +2247,7 @@ pub fn serve( ); // POST validator/aggregate_and_proofs - let post_validator_aggregate_and_proofs = eth1_v1 + let post_validator_aggregate_and_proofs = eth_v1 .and(warp::path("validator")) .and(warp::path("aggregate_and_proofs")) .and(warp::path::end()) @@ -2271,6 +2294,16 @@ pub fn serve( // identical aggregates, especially if they're using the same beacon // node. Err(AttnError::AttestationAlreadyKnown(_)) => continue, + // If we've already seen this aggregator produce an aggregate, just + // skip this one. + // + // We're likely to see this with VCs that use fallback BNs. The first + // BN might time-out *after* publishing the aggregate and then the + // second BN will indicate it's already seen the aggregate. + // + // There's no actual error for the user or the network since the + // aggregate has been successfully published by some other node. + Err(AttnError::AggregatorAlreadyKnown(_)) => continue, Err(e) => { error!(log, "Failure verifying aggregate and proofs"; @@ -2303,12 +2336,13 @@ pub fn serve( ); failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e))); } - if let Err(e) = chain.add_to_block_inclusion_pool(&verified_aggregate) { - warn!(log, - "Could not add verified aggregate attestation to the inclusion pool"; - "error" => format!("{:?}", e), - "request_index" => index, - ); + if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) { + warn!( + log, + "Could not add verified aggregate attestation to the inclusion pool"; + "error" => ?e, + "request_index" => index, + ); failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e))); } } @@ -2324,14 +2358,14 @@ pub fn serve( }, ); - let post_validator_contribution_and_proofs = eth1_v1 + let post_validator_contribution_and_proofs = eth_v1 .and(warp::path("validator")) .and(warp::path("contribution_and_proofs")) .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(chain_filter.clone()) .and(warp::body::json()) - .and(network_tx_filter.clone()) + .and(network_tx_filter) .and(log_filter.clone()) .and_then( |chain: Arc>, @@ -2351,17 +2385,19 @@ pub fn serve( ); // POST validator/beacon_committee_subscriptions - let post_validator_beacon_committee_subscriptions = eth1_v1 + let post_validator_beacon_committee_subscriptions = eth_v1 .and(warp::path("validator")) .and(warp::path("beacon_committee_subscriptions")) .and(warp::path::end()) .and(warp::body::json()) - .and(network_tx_filter.clone()) + .and(validator_subscription_tx_filter.clone()) .and(chain_filter.clone()) + .and(log_filter.clone()) .and_then( |subscriptions: Vec, - network_tx: UnboundedSender>, - chain: Arc>| { + validator_subscription_tx: Sender, + chain: Arc>, + log: Logger| { blocking_json_task(move || { for subscription in &subscriptions { chain @@ -2369,7 +2405,7 @@ pub fn serve( .write() .auto_register_local_validator(subscription.validator_index); - let subscription = api_types::ValidatorSubscription { + let validator_subscription = api_types::ValidatorSubscription { validator_index: subscription.validator_index, attestation_committee_index: subscription.committee_index, slot: subscription.slot, @@ -2377,12 +2413,20 @@ pub fn serve( is_aggregator: subscription.is_aggregator, }; - publish_network_message( - &network_tx, - NetworkMessage::AttestationSubscribe { - subscriptions: vec![subscription], - }, - )?; + let message = ValidatorSubscriptionMessage::AttestationSubscribe { + subscriptions: vec![validator_subscription], + }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + log, + "Unable to process committee subscriptions"; + "info" => "the host may be overloaded or resource-constrained", + "error" => ?e, + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down".to_string(), + )); + } } Ok(()) @@ -2391,71 +2435,191 @@ pub fn serve( ); // POST validator/prepare_beacon_proposer - let post_validator_prepare_beacon_proposer = eth1_v1 + let post_validator_prepare_beacon_proposer = eth_v1 .and(warp::path("validator")) .and(warp::path("prepare_beacon_proposer")) .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(chain_filter.clone()) - .and(warp::addr::remote()) .and(log_filter.clone()) .and(warp::body::json()) .and_then( |chain: Arc>, - client_addr: Option, log: Logger, - preparation_data: Vec| { - blocking_json_task(move || { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + preparation_data: Vec| async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; - debug!( - log, - "Received proposer preparation data"; - "count" => preparation_data.len(), - "client" => client_addr - .map(|a| a.to_string()) - .unwrap_or_else(|| "unknown".to_string()), - ); + let current_slot = chain + .slot() + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - execution_layer - .update_proposer_preparation_blocking(current_epoch, &preparation_data) - .map_err(|_e| { - warp_utils::reject::custom_bad_request( - "error processing proposer preparations".to_string(), - ) - })?; + debug!( + log, + "Received proposer preparation data"; + "count" => preparation_data.len(), + ); - chain.prepare_beacon_proposer_blocking().map_err(|e| { + execution_layer + .update_proposer_preparation(current_epoch, &preparation_data) + .await; + + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { warp_utils::reject::custom_bad_request(format!( "error updating proposer preparations: {:?}", e )) })?; - Ok(()) - }) + Ok::<_, warp::reject::Rejection>(warp::reply::json(&())) }, ); + // POST validator/register_validator + let post_validator_register_validator = eth_v1 + .and(warp::path("validator")) + .and(warp::path("register_validator")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(log_filter.clone()) + .and(warp::body::json()) + .and_then( + |chain: Arc>, + log: Logger, + register_val_data: Vec| async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + debug!( + log, + "Received register validator request"; + "count" => register_val_data.len(), + ); + + let head_snapshot = chain.head_snapshot(); + let spec = &chain.spec; + + let (preparation_data, filtered_registration_data): ( + Vec, + Vec, + ) = register_val_data + .into_iter() + .filter_map(|register_data| { + chain + .validator_index(®ister_data.message.pubkey) + .ok() + .flatten() + .and_then(|validator_index| { + let validator = head_snapshot + .beacon_state + .get_validator(validator_index) + .ok()?; + let validator_status = ValidatorStatus::from_validator( + validator, + current_epoch, + spec.far_future_epoch, + ) + .superstatus(); + let is_active_or_pending = + matches!(validator_status, ValidatorStatus::Pending) + || matches!(validator_status, ValidatorStatus::Active); + + // Filter out validators who are not 'active' or 'pending'. + is_active_or_pending.then(|| { + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data.message.fee_recipient, + }, + register_data, + ) + }) + }) + }) + .unzip(); + + // Update the prepare beacon proposer cache based on this request. + execution_layer + .update_proposer_preparation(current_epoch, &preparation_data) + .await; + + // Call prepare beacon proposer blocking with the latest update in order to make + // sure we have a local payload to fall back to in the event of the blinded block + // flow failing. + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + let builder = execution_layer + .builder() + .as_ref() + .ok_or(BeaconChainError::BuilderMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; + + info!( + log, + "Forwarding register validator request to connected builder"; + "count" => filtered_registration_data.len(), + ); + + builder + .post_builder_validators(&filtered_registration_data) + .await + .map(|resp| warp::reply::json(&resp)) + .map_err(|e| { + error!(log, "Error from connected relay"; "error" => ?e); + // Forward the HTTP status code if we are able to, otherwise fall back + // to a server error. + if let eth2::Error::ServerMessage(message) = e { + if message.code == StatusCode::BAD_REQUEST.as_u16() { + return warp_utils::reject::custom_bad_request(message.message); + } else { + // According to the spec this response should only be a 400 or 500, + // so we fall back to a 500 here. + return warp_utils::reject::custom_server_error(message.message); + } + } + warp_utils::reject::custom_server_error(format!("{e:?}")) + }) + }, + ); // POST validator/sync_committee_subscriptions - let post_validator_sync_committee_subscriptions = eth1_v1 + let post_validator_sync_committee_subscriptions = eth_v1 .and(warp::path("validator")) .and(warp::path("sync_committee_subscriptions")) .and(warp::path::end()) .and(warp::body::json()) - .and(network_tx_filter) + .and(validator_subscription_tx_filter) .and(chain_filter.clone()) + .and(log_filter.clone()) .and_then( |subscriptions: Vec, - network_tx: UnboundedSender>, - chain: Arc>| { + validator_subscription_tx: Sender, + chain: Arc>, + log: Logger + | { blocking_json_task(move || { for subscription in subscriptions { chain @@ -2463,12 +2627,20 @@ pub fn serve( .write() .auto_register_local_validator(subscription.validator_index); - publish_network_message( - &network_tx, - NetworkMessage::SyncCommitteeSubscribe { + let message = ValidatorSubscriptionMessage::SyncCommitteeSubscribe { subscriptions: vec![subscription], - }, - )?; + }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + log, + "Unable to process sync subscriptions"; + "info" => "the host may be overloaded or resource-constrained", + "error" => ?e + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down".to_string(), + )); + } } Ok(()) @@ -2607,7 +2779,11 @@ pub fn serve( .and_then(|chain: Arc>| { blocking_task(move || { Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( - chain.fork_choice.read().proto_array().core_proto_array(), + chain + .canonical_head + .fork_choice_read_lock() + .proto_array() + .core_proto_array(), ))) }) }); @@ -2650,9 +2826,6 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|chain: Arc>| { blocking_json_task(move || { - let head_info = chain - .head_info() - .map_err(warp_utils::reject::beacon_chain_error)?; let current_slot_opt = chain.slot().ok(); chain @@ -2664,7 +2837,7 @@ pub fn serve( ) }) .and_then(|eth1| { - eth1.sync_status(head_info.genesis_time, current_slot_opt, &chain.spec) + eth1.sync_status(chain.genesis_time, current_slot_opt, &chain.spec) .ok_or_else(|| { warp_utils::reject::custom_server_error( "Unable to determine Eth1 sync status".to_string(), @@ -2724,7 +2897,8 @@ pub fn serve( .and(chain_filter.clone()) .and_then(|state_id: StateId, chain: Arc>| { blocking_task(move || { - let state = state_id.state(&chain)?; + // This debug endpoint provides no indication of optimistic status. + let (state, _execution_optimistic) = state_id.state(&chain)?; Response::builder() .status(200) .header("Content-Type", "application/ssz") @@ -2787,7 +2961,7 @@ pub fn serve( .and(chain_filter.clone()) .and(log_filter.clone()) .and_then( - |blocks: Vec>, + |blocks: Vec>>, chain: Arc>, log: Logger| { info!( @@ -2812,6 +2986,18 @@ pub fn serve( blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log)) }); + // POST lighthouse/analysis/block_rewards + let post_lighthouse_block_rewards = warp::path("lighthouse") + .and(warp::path("analysis")) + .and(warp::path("block_rewards")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and(log_filter.clone()) + .and_then(|blocks, chain, log| { + blocking_json_task(move || block_rewards::compute_block_rewards(blocks, chain, log)) + }); + // GET lighthouse/analysis/attestation_performance/{index} let get_lighthouse_attestation_performance = warp::path("lighthouse") .and(warp::path("analysis")) @@ -2839,7 +3025,19 @@ pub fn serve( }) }); - let get_events = eth1_v1 + // GET lighthouse/merge_readiness + let get_lighthouse_merge_readiness = warp::path("lighthouse") + .and(warp::path("merge_readiness")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| async move { + let merge_readiness = chain.check_merge_readiness().await; + Ok::<_, warp::reject::Rejection>(warp::reply::json(&api_types::GenericResponse::from( + merge_readiness, + ))) + }); + + let get_events = eth_v1 .and(warp::path("events")) .and(warp::path::end()) .and(multi_key_query::()) @@ -2967,6 +3165,7 @@ pub fn serve( .or(get_lighthouse_block_rewards.boxed()) .or(get_lighthouse_attestation_performance.boxed()) .or(get_lighthouse_block_packing_efficiency.boxed()) + .or(get_lighthouse_merge_readiness.boxed()) .or(get_events.boxed()), ) .or(warp::post().and( @@ -2985,9 +3184,11 @@ pub fn serve( .or(post_validator_beacon_committee_subscriptions.boxed()) .or(post_validator_sync_committee_subscriptions.boxed()) .or(post_validator_prepare_beacon_proposer.boxed()) + .or(post_validator_register_validator.boxed()) .or(post_lighthouse_liveness.boxed()) .or(post_lighthouse_database_reconstruct.boxed()) - .or(post_lighthouse_database_historical_blocks.boxed()), + .or(post_lighthouse_database_historical_blocks.boxed()) + .or(post_lighthouse_block_rewards.boxed()), )) .recover(warp_utils::reject::handle_rejection) .with(slog_logging(log.clone())) diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index c094e90e04..cf9bff67fb 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -55,10 +55,16 @@ pub fn proposer_duties( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, _) = + let (proposers, dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(chain, request_epoch, dependent_root, proposers) + convert_to_api_response( + chain, + request_epoch, + dependent_root, + execution_status.is_optimistic_or_invalid(), + proposers, + ) } else if request_epoch > current_epoch .safe_add(1) @@ -88,16 +94,24 @@ fn try_proposer_duties_from_cache( request_epoch: Epoch, chain: &BeaconChain, ) -> Result, warp::reject::Rejection> { - let head = chain - .head_info() + let head = chain.canonical_head.cached_head(); + let head_block = &head.snapshot.beacon_block; + let head_block_root = head.head_block_root(); + let head_decision_root = head + .snapshot + .beacon_state + .proposer_shuffling_decision_root(head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; + let head_epoch = head_block.slot().epoch(T::EthSpec::slots_per_epoch()); + let execution_optimistic = chain + .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::beacon_chain_error)?; - let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); let dependent_root = match head_epoch.cmp(&request_epoch) { // head_epoch == request_epoch - Ordering::Equal => head.proposer_shuffling_decision_root, + Ordering::Equal => head_decision_root, // head_epoch < request_epoch - Ordering::Less => head.block_root, + Ordering::Less => head_block_root, // head_epoch > request_epoch Ordering::Greater => { return Err(warp_utils::reject::custom_server_error(format!( @@ -113,7 +127,13 @@ fn try_proposer_duties_from_cache( .get_epoch::(dependent_root, request_epoch) .cloned() .map(|indices| { - convert_to_api_response(chain, request_epoch, dependent_root, indices.to_vec()) + convert_to_api_response( + chain, + request_epoch, + dependent_root, + execution_optimistic, + indices.to_vec(), + ) }) .transpose() } @@ -132,8 +152,9 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { - let (indices, dependent_root, fork) = compute_proposer_duties_from_head(current_epoch, chain) - .map_err(warp_utils::reject::beacon_chain_error)?; + let (indices, dependent_root, execution_status, fork) = + compute_proposer_duties_from_head(current_epoch, chain) + .map_err(warp_utils::reject::beacon_chain_error)?; // Prime the proposer shuffling cache with the newly-learned value. chain @@ -143,7 +164,13 @@ fn compute_and_cache_proposer_duties( .map_err(BeaconChainError::from) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(chain, current_epoch, dependent_root, indices) + convert_to_api_response( + chain, + current_epoch, + dependent_root, + execution_status.is_optimistic_or_invalid(), + indices, + ) } /// Compute some proposer duties by reading a `BeaconState` from disk, completely ignoring the @@ -154,27 +181,36 @@ fn compute_historic_proposer_duties( ) -> Result { // If the head is quite old then it might still be relevant for a historical request. // - // Use the `with_head` function to read & clone in a single call to avoid race conditions. - let state_opt = chain - .with_head(|head| { - if head.beacon_state.current_epoch() <= epoch { - Ok(Some((head.beacon_state_root(), head.beacon_state.clone()))) - } else { - Ok(None) - } - }) - .map_err(warp_utils::reject::beacon_chain_error)?; - - let state = if let Some((state_root, mut state)) = state_opt { - // If we've loaded the head state it might be from a previous epoch, ensure it's in a - // suitable epoch. - ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec) + // Avoid holding the `cached_head` longer than necessary. + let state_opt = { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() .map_err(warp_utils::reject::beacon_chain_error)?; - state - } else { - StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + let head = &cached_head.snapshot; + + if head.beacon_state.current_epoch() <= epoch { + Some(( + head.beacon_state_root(), + head.beacon_state.clone(), + execution_status.is_optimistic_or_invalid(), + )) + } else { + None + } }; + let (state, execution_optimistic) = + if let Some((state_root, mut state, execution_optimistic)) = state_opt { + // If we've loaded the head state it might be from a previous epoch, ensure it's in a + // suitable epoch. + ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec) + .map_err(warp_utils::reject::beacon_chain_error)?; + (state, execution_optimistic) + } else { + StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)? + }; + // Ensure the state lookup was correct. if state.current_epoch() != epoch { return Err(warp_utils::reject::custom_server_error(format!( @@ -196,7 +232,7 @@ fn compute_historic_proposer_duties( .map_err(BeaconChainError::from) .map_err(warp_utils::reject::beacon_chain_error)?; - convert_to_api_response(chain, epoch, dependent_root, indices) + convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices) } /// Converts the internal representation of proposer duties into one that is compatible with the @@ -205,6 +241,7 @@ fn convert_to_api_response( chain: &BeaconChain, epoch: Epoch, dependent_root: Hash256, + execution_optimistic: bool, indices: Vec, ) -> Result { let index_to_pubkey_map = chain @@ -239,6 +276,7 @@ fn convert_to_api_response( } else { Ok(api_types::DutiesResponse { dependent_root, + execution_optimistic: Some(execution_optimistic), data: proposer_data, }) } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs new file mode 100644 index 0000000000..60ca8f2328 --- /dev/null +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -0,0 +1,176 @@ +use crate::metrics; +use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{crit, error, info, warn, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tree_hash::TreeHash; +use types::{ + BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, + SignedBeaconBlock, +}; +use warp::Rejection; + +/// Handles a request from the HTTP API for full blocks. +pub async fn publish_block( + block: Arc>, + chain: Arc>, + network_tx: &UnboundedSender>, + log: Logger, +) -> Result<(), Rejection> { + let seen_timestamp = timestamp_now(); + + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + + // Determine the delay after the start of the slot, register it with metrics. + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); + + match chain + .process_block(block.clone(), CountUnrealized::True) + .await + { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "block_delay" => ?delay, + "root" => format!("{}", root), + "proposer_index" => block.message().proposer_index(), + "slot" => block.slot(), + ); + + // Notify the validator monitor. + chain.validator_monitor.read().register_api_block( + seen_timestamp, + block.message(), + root, + &chain.slot_clock, + ); + + // Update the head since it's likely this block will become the new + // head. + chain.recompute_head_at_current_slot().await; + + // Perform some logging to inform users if their blocks are being produced + // late. + // + // Check to see the thresholds are non-zero to avoid logging errors with small + // slot times (e.g., during testing) + let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let error_threshold = crit_threshold / 2; + if delay >= crit_threshold { + crit!( + log, + "Block was broadcast too late"; + "msg" => "system may be overloaded, block likely to be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } else if delay >= error_threshold { + error!( + log, + "Block broadcast was delayed"; + "msg" => "system may be overloaded, block may be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } + + Ok(()) + } + Err(BlockError::BlockIsAlreadyKnown) => { + info!( + log, + "Block from HTTP API already known"; + "block" => ?block.canonical_root(), + "slot" => block.slot(), + ); + Ok(()) + } + Err(BlockError::RepeatProposal { proposer, slot }) => { + warn!( + log, + "Block ignored due to repeat proposal"; + "msg" => "this can happen when a VC uses fallback BNs. \ + whilst this is not necessarily an error, it can indicate issues with a BN \ + or between the VC and BN.", + "slot" => slot, + "proposer" => proposer, + ); + Ok(()) + } + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } +} + +/// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full +/// blocks before publishing. +pub async fn publish_blinded_block( + block: SignedBeaconBlock>, + chain: Arc>, + network_tx: &UnboundedSender>, + log: Logger, +) -> Result<(), Rejection> { + let full_block = reconstruct_block(chain.clone(), block, log.clone()).await?; + publish_block::(Arc::new(full_block), chain, network_tx, log).await +} + +/// Deconstruct the given blinded block, and construct a full block. This attempts to use the +/// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve +/// the full payload. +async fn reconstruct_block( + chain: Arc>, + block: SignedBeaconBlock>, + log: Logger, +) -> Result>, Rejection> { + let full_payload = if let Ok(payload_header) = block.message().body().execution_payload() { + let el = chain.execution_layer.as_ref().ok_or_else(|| { + warp_utils::reject::custom_server_error("Missing execution layer".to_string()) + })?; + + // If the execution block hash is zero, use an empty payload. + let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { + ExecutionPayload::default() + // If we already have an execution payload with this transactions root cached, use it. + } else if let Some(cached_payload) = + el.get_payload_by_root(&payload_header.tree_hash_root()) + { + info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash); + cached_payload + // Otherwise, this means we are attempting a blind block proposal. + } else { + let full_payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Blind block proposal failed: {:?}", + e + )) + })?; + info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); + full_payload + }; + + Some(full_payload) + } else { + None + }; + + block.try_into_full_block(full_payload).ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) + }) +} diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 95c049d997..9fe93e7abf 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -1,14 +1,17 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use crate::ExecutionOptimistic; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::StateId as CoreStateId; +use std::fmt; use std::str::FromStr; -use types::{BeaconState, EthSpec, Fork, Hash256, Slot}; +use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot}; /// Wraps `eth2::types::StateId` and provides common state-access functionality. E.g., reading /// states or parts of states from the database. -pub struct StateId(CoreStateId); +#[derive(Debug)] +pub struct StateId(pub CoreStateId); impl StateId { - pub fn slot(slot: Slot) -> Self { + pub fn from_slot(slot: Slot) -> Self { Self(CoreStateId::Slot(slot)) } @@ -16,62 +19,125 @@ impl StateId { pub fn root( &self, chain: &BeaconChain, - ) -> Result { - let slot = match &self.0 { + ) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> { + let (slot, execution_optimistic) = match &self.0 { CoreStateId::Head => { - return chain - .head_info() - .map(|head| head.state_root) - .map_err(warp_utils::reject::beacon_chain_error) + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok(( + cached_head.head_state_root(), + execution_status.is_optimistic_or_invalid(), + )); } - CoreStateId::Genesis => return Ok(chain.genesis_state_root), - CoreStateId::Finalized => chain.head_info().map(|head| { - head.finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - }), - CoreStateId::Justified => chain.head_info().map(|head| { - head.current_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - }), - CoreStateId::Slot(slot) => Ok(*slot), - CoreStateId::Root(root) => return Ok(*root), - } - .map_err(warp_utils::reject::beacon_chain_error)?; + CoreStateId::Genesis => return Ok((chain.genesis_state_root, false)), + CoreStateId::Finalized => { + let finalized_checkpoint = + chain.canonical_head.cached_head().finalized_checkpoint(); + checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)? + } + CoreStateId::Justified => { + let justified_checkpoint = + chain.canonical_head.cached_head().justified_checkpoint(); + checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)? + } + CoreStateId::Slot(slot) => ( + *slot, + chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?, + ), + CoreStateId::Root(root) => { + if let Some(hot_summary) = chain + .store + .load_hot_state_summary(root) + .map_err(BeaconChainError::DBError) + .map_err(warp_utils::reject::beacon_chain_error)? + { + let execution_optimistic = chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic)); + } else if let Some(_cold_state_slot) = chain + .store + .load_cold_state_slot(root) + .map_err(BeaconChainError::DBError) + .map_err(warp_utils::reject::beacon_chain_error)? + { + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let finalized_root = fork_choice + .cached_fork_choice_view() + .finalized_checkpoint + .root; + let execution_optimistic = fork_choice + .is_optimistic_or_invalid_block_no_fallback(&finalized_root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok((*root, execution_optimistic)); + } else { + return Err(warp_utils::reject::custom_not_found(format!( + "beacon state for state root {}", + root + ))); + } + } + }; - chain + let root = chain .state_root_at_slot(slot) .map_err(warp_utils::reject::beacon_chain_error)? .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) - }) + })?; + + Ok((root, execution_optimistic)) } /// Return the `fork` field of the state identified by `self`. + /// Also returns the `execution_optimistic` value of the state. + pub fn fork_and_execution_optimistic( + &self, + chain: &BeaconChain, + ) -> Result<(Fork, bool), warp::Rejection> { + self.map_state_and_execution_optimistic(chain, |state, execution_optimistic| { + Ok((state.fork(), execution_optimistic)) + }) + } + + /// Convenience function to compute `fork` when `execution_optimistic` isn't desired. pub fn fork( &self, chain: &BeaconChain, ) -> Result { - self.map_state(chain, |state| Ok(state.fork())) + self.fork_and_execution_optimistic(chain) + .map(|(fork, _)| fork) } /// Return the `BeaconState` identified by `self`. pub fn state( &self, chain: &BeaconChain, - ) -> Result, warp::Rejection> { - let (state_root, slot_opt) = match &self.0 { + ) -> Result<(BeaconState, ExecutionOptimistic), warp::Rejection> { + let ((state_root, execution_optimistic), slot_opt) = match &self.0 { CoreStateId::Head => { - return chain - .head_beacon_state() - .map_err(warp_utils::reject::beacon_chain_error) + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + return Ok(( + cached_head.snapshot.beacon_state.clone(), + execution_status.is_optimistic_or_invalid(), + )); } CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), _ => (self.root(chain)?, None), }; - chain + let state = chain .get_state(&state_root, slot_opt) .map_err(warp_utils::reject::beacon_chain_error) .and_then(|opt| { @@ -81,13 +147,17 @@ impl StateId { state_root )) }) - }) + })?; + + Ok((state, execution_optimistic)) } + /* /// Map a function across the `BeaconState` identified by `self`. /// /// This function will avoid instantiating/copying a new state when `self` points to the head /// of the chain. + #[allow(dead_code)] pub fn map_state( &self, chain: &BeaconChain, @@ -103,6 +173,36 @@ impl StateId { _ => func(&self.state(chain)?), } } + */ + + /// Functions the same as `map_state` but additionally computes the value of + /// `execution_optimistic` of the state identified by `self`. + /// + /// This is to avoid re-instantiating `state` unnecessarily. + pub fn map_state_and_execution_optimistic( + &self, + chain: &BeaconChain, + func: F, + ) -> Result + where + F: Fn(&BeaconState, bool) -> Result, + { + let (state, execution_optimistic) = match &self.0 { + CoreStateId::Head => { + let (head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + return func( + &head.snapshot.beacon_state, + execution_status.is_optimistic_or_invalid(), + ); + } + _ => self.state(chain)?, + }; + + func(&state, execution_optimistic) + } } impl FromStr for StateId { @@ -112,3 +212,35 @@ impl FromStr for StateId { CoreStateId::from_str(s).map(Self) } } + +impl fmt::Display for StateId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// Returns the first slot of the checkpoint's `epoch` and the execution status of the checkpoint's +/// `root`. +pub fn checkpoint_slot_and_execution_optimistic( + chain: &BeaconChain, + checkpoint: Checkpoint, +) -> Result<(Slot, ExecutionOptimistic), warp::reject::Rejection> { + let slot = checkpoint.epoch.start_slot(T::EthSpec::slots_per_epoch()); + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + let finalized_checkpoint = fork_choice.cached_fork_choice_view().finalized_checkpoint; + + // If the checkpoint is pre-finalization, just use the optimistic status of the finalized + // block. + let root = if checkpoint.epoch < finalized_checkpoint.epoch { + &finalized_checkpoint.root + } else { + &checkpoint.root + }; + + let execution_optimistic = fork_choice + .is_optimistic_or_invalid_block_no_fallback(root) + .map_err(BeaconChainError::ForkChoiceError) + .map_err(warp_utils::reject::beacon_chain_error)?; + + Ok((slot, execution_optimistic)) +} diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 3ebc3c4ec8..a6acf308fa 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -11,7 +11,7 @@ use beacon_chain::{ use eth2::types::{self as api_types}; use lighthouse_network::PubsubMessage; use network::NetworkMessage; -use slog::{error, warn, Logger}; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use std::cmp::max; use std::collections::HashMap; @@ -22,7 +22,7 @@ use types::{ }; /// The struct that is returned to the requesting HTTP client. -type SyncDuties = api_types::GenericResponse>; +type SyncDuties = api_types::ExecutionOptimisticResponse>; /// Handles a request from the HTTP API for sync committee duties. pub fn sync_committee_duties( @@ -34,14 +34,20 @@ pub fn sync_committee_duties( altair_fork_epoch } else { // Empty response for networks with Altair disabled. - return Ok(convert_to_response(vec![])); + return Ok(convert_to_response(vec![], false)); }; + // Even when computing duties from state, any block roots pulled using the request epoch are + // still dependent on the head. So using `is_optimistic_head` is fine for both cases. + let execution_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + // Try using the head's sync committees to satisfy the request. This should be sufficient for // the vast majority of requests. Rather than checking if we think the request will succeed in a // way prone to data races, we attempt the request immediately and check the error code. match chain.sync_committee_duties_from_head(request_epoch, request_indices) { - Ok(duties) => return Ok(convert_to_response(duties)), + Ok(duties) => return Ok(convert_to_response(duties, execution_optimistic)), Err(BeaconChainError::SyncDutiesError(BeaconStateError::SyncCommitteeNotKnown { .. })) @@ -60,7 +66,7 @@ pub fn sync_committee_duties( )), e => warp_utils::reject::beacon_chain_error(e), })?; - Ok(convert_to_response(duties)) + Ok(convert_to_response(duties, execution_optimistic)) } /// Slow path for duties: load a state and use it to compute the duties. @@ -117,8 +123,9 @@ fn duties_from_state_load( } } -fn convert_to_response(duties: Vec>) -> SyncDuties { +fn convert_to_response(duties: Vec>, execution_optimistic: bool) -> SyncDuties { api_types::GenericResponse::from(duties.into_iter().flatten().collect::>()) + .add_execution_optimistic(execution_optimistic) } /// Receive sync committee duties, storing them in the pools & broadcasting them. @@ -182,6 +189,24 @@ pub fn process_sync_committee_signatures( verified_for_pool = Some(verified); } + // If this validator has already published a sync message, just ignore this message + // without returning an error. + // + // This is likely to happen when a VC uses fallback BNs. If the first BN publishes + // the message and then fails to respond in a timely fashion then the VC will move + // to the second BN. The BN will then report that this message has already been + // seen, which is not actually an error as far as the network or user are concerned. + Err(SyncVerificationError::PriorSyncCommitteeMessageKnown { + validator_index, + slot, + }) => { + debug!( + log, + "Ignoring already-known sync message"; + "slot" => slot, + "validator_index" => validator_index, + ); + } Err(e) => { error!( log, @@ -276,6 +301,16 @@ pub fn process_signed_contribution_and_proofs( // If we already know the contribution, don't broadcast it or attempt to // further verify it. Return success. Err(SyncVerificationError::SyncContributionAlreadyKnown(_)) => continue, + // If we've already seen this aggregator produce an aggregate, just + // skip this one. + // + // We're likely to see this with VCs that use fallback BNs. The first + // BN might time-out *after* publishing the aggregate and then the + // second BN will indicate it's already seen the aggregate. + // + // There's no actual error for the user or the network since the + // aggregate has been successfully published by some other node. + Err(SyncVerificationError::AggregatorAlreadyKnown(_)) => continue, Err(e) => { error!( log, diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index 48dfc17ffa..917e85e649 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -16,7 +16,10 @@ fn end_of_epoch_state( chain: &BeaconChain, ) -> Result, warp::reject::Rejection> { let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); - StateId::slot(target_slot).state(chain) + // The execution status is not returned, any functions which rely upon this method might return + // optimistic information without explicitly declaring so. + let (state, _execution_status) = StateId::from_slot(target_slot).state(chain)?; + Ok(state) } /// Generate an `EpochProcessingSummary` for `state`. diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 854ef0c858..87ba3a4663 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,4 +1,6 @@ -use crate::api_types::{EndpointVersion, ForkVersionedResponse}; +use crate::api_types::{ + EndpointVersion, ExecutionOptimisticForkVersionedResponse, ForkVersionedResponse, +}; use eth2::CONSENSUS_VERSION_HEADER; use serde::Serialize; use types::{ForkName, InconsistentFork}; @@ -25,6 +27,26 @@ pub fn fork_versioned_response( }) } +pub fn execution_optimistic_fork_versioned_response( + endpoint_version: EndpointVersion, + fork_name: ForkName, + execution_optimistic: bool, + data: T, +) -> Result, warp::reject::Rejection> { + let fork_name = if endpoint_version == V1 { + None + } else if endpoint_version == V2 { + Some(fork_name) + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + Ok(ExecutionOptimisticForkVersionedResponse { + version: fork_name, + execution_optimistic: Some(execution_optimistic), + data, + }) +} + /// Add the `Eth-Consensus-Version` header to a response. pub fn add_consensus_version_header(reply: T, fork_name: ForkName) -> WithHeader { reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()) diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 06466c43bb..032e1346fb 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -11,14 +11,14 @@ use lighthouse_network::{ types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager, }; -use network::NetworkMessage; +use network::{NetworkReceivers, NetworkSenders}; use sensitive_url::SensitiveUrl; use slog::Logger; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::Duration; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::oneshot; use types::{ChainSpec, EthSpec}; pub const TCP_PORT: u16 = 42; @@ -30,7 +30,7 @@ pub const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000"; pub struct InteractiveTester { pub harness: BeaconChainHarness>, pub client: BeaconNodeHttpClient, - pub network_rx: mpsc::UnboundedReceiver>, + pub network_rx: NetworkReceivers, _server_shutdown: oneshot::Sender<()>, } @@ -41,7 +41,7 @@ pub struct ApiServer> { pub server: SFut, pub listening_socket: SocketAddr, pub shutdown_tx: oneshot::Sender<()>, - pub network_rx: tokio::sync::mpsc::UnboundedReceiver>, + pub network_rx: NetworkReceivers, pub local_enr: Enr, pub external_peer_id: PeerId, } @@ -87,7 +87,17 @@ pub async fn create_api_server( chain: Arc>, log: Logger, ) -> ApiServer> { - let (network_tx, network_rx) = mpsc::unbounded_channel(); + // Get a random unused port. + let port = unused_port::unused_tcp_port().unwrap(); + create_api_server_on_port(chain, log, port).await +} + +pub async fn create_api_server_on_port( + chain: Arc>, + log: Logger, + port: u16, +) -> ApiServer> { + let (network_senders, network_receivers) = NetworkSenders::new(); // Default metadata let meta_data = MetaData::V2(MetaDataV2 { @@ -129,14 +139,14 @@ pub async fn create_api_server( config: Config { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - listen_port: 0, + listen_port: port, allow_origin: None, - serve_legacy_spec: true, tls_config: None, allow_sync_stalled: false, + spec_fork_name: None, }, chain: Some(chain.clone()), - network_tx: Some(network_tx), + network_senders: Some(network_senders), network_globals: Some(network_globals), eth1_service: Some(eth1_service), log, @@ -153,7 +163,7 @@ pub async fn create_api_server( server, listening_socket, shutdown_tx, - network_rx, + network_rx: network_receivers, local_enr: enr, external_peer_id: peer_id, } diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 6b4f79fa5d..942a1167c2 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -45,6 +45,7 @@ async fn sync_committee_duties_across_fork() { genesis_state_root, &all_validators, ) + .await .unwrap(); harness.advance_slot(); @@ -61,6 +62,7 @@ async fn sync_committee_duties_across_fork() { let state_root = state.canonical_root(); harness .add_attested_block_at_slot(fork_slot, state, state_root, &all_validators) + .await .unwrap(); assert_eq!( @@ -244,6 +246,7 @@ async fn sync_committee_indices_across_fork() { genesis_state_root, &all_validators, ) + .await .unwrap(); harness.advance_slot(); @@ -277,6 +280,7 @@ async fn sync_committee_indices_across_fork() { let state_root = state.canonical_root(); harness .add_attested_block_at_slot(fork_slot + 1, state, state_root, &all_validators) + .await .unwrap(); let current_period = fork_epoch.sync_committee_period(&spec).unwrap(); diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 8b12aa4a5b..3327093d09 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -47,11 +47,13 @@ pub async fn fork_choice_before_proposal() { // Create some chain depth. harness.advance_slot(); - harness.extend_chain( - num_initial as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + num_initial as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; // We set up the following block graph, where B is a block that is temporarily orphaned by C, // but is then reinstated and built upon by D. @@ -64,8 +66,8 @@ pub async fn fork_choice_before_proposal() { let slot_d = slot_a + 3; let state_a = harness.get_current_state(); - let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b); - let block_root_b = harness.process_block(slot_b, block_b).unwrap(); + let (block_b, state_b) = harness.make_block(state_a.clone(), slot_b).await; + let block_root_b = harness.process_block(slot_b, block_b).await.unwrap(); // Create attestations to B but keep them in reserve until after C has been processed. let attestations_b = harness.make_attestations( @@ -76,8 +78,11 @@ pub async fn fork_choice_before_proposal() { slot_b, ); - let (block_c, state_c) = harness.make_block(state_a, slot_c); - let block_root_c = harness.process_block(slot_c, block_c.clone()).unwrap(); + let (block_c, state_c) = harness.make_block(state_a, slot_c).await; + let block_root_c = harness + .process_block(slot_c, block_c.clone()) + .await + .unwrap(); // Create attestations to C from a small number of validators and process them immediately. let attestations_c = harness.make_attestations( @@ -94,7 +99,7 @@ pub async fn fork_choice_before_proposal() { // Due to proposer boost, the head should be C during slot C. assert_eq!( - harness.chain.head_info().unwrap().block_root, + harness.chain.canonical_head.cached_head().head_block_root(), block_root_c.into() ); @@ -102,7 +107,7 @@ pub async fn fork_choice_before_proposal() { // Manually prod the per-slot task, because the slot timer doesn't run in the background in // these tests. harness.advance_slot(); - harness.chain.per_slot_task(); + harness.chain.per_slot_task().await; let proposer_index = state_b .get_beacon_proposer_index(slot_d, &harness.chain.spec) @@ -119,7 +124,7 @@ pub async fn fork_choice_before_proposal() { // Head is now B. assert_eq!( - harness.chain.head_info().unwrap().block_root, + harness.chain.canonical_head.cached_head().head_block_root(), block_root_b.into() ); // D's parent is B. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 5f53a96156..ca240e64d2 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,4 +1,4 @@ -use crate::common::{create_api_server, ApiServer}; +use crate::common::{create_api_server, create_api_server_on_port, ApiServer}; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, @@ -8,25 +8,30 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::*, + types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; +use execution_layer::test_utils::Operation; +use execution_layer::test_utils::TestingBuilder; +use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; +use http_api::{BlockId, StateId}; use lighthouse_network::{Enr, EnrExt, PeerId}; -use network::NetworkMessage; +use network::NetworkReceivers; +use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; use state_processing::per_slot_processing; use std::convert::TryInto; use std::sync::Arc; -use task_executor::test_utils::TestRuntime; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::oneshot; use tokio::time::Duration; use tree_hash::TreeHash; +use types::application_domain::ApplicationDomain; use types::{ - AggregateSignature, BeaconState, BitList, Domain, EthSpec, Hash256, Keypair, MainnetEthSpec, - RelativeEpoch, SelectionProof, SignedRoot, Slot, + AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, Hash256, Keypair, + MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, }; type E = MainnetEthSpec; @@ -50,6 +55,7 @@ const SKIPPED_SLOTS: &[u64] = &[ ]; struct ApiTester { + harness: Arc>>, chain: Arc>>, client: BeaconNodeHttpClient, next_block: SignedBeaconBlock, @@ -60,11 +66,10 @@ struct ApiTester { proposer_slashing: ProposerSlashing, voluntary_exit: SignedVoluntaryExit, _server_shutdown: oneshot::Sender<()>, - validator_keypairs: Vec, - network_rx: mpsc::UnboundedReceiver>, + network_rx: NetworkReceivers, local_enr: Enr, external_peer_id: PeerId, - _runtime: TestRuntime, + mock_builder: Option>>, } impl ApiTester { @@ -75,12 +80,32 @@ impl ApiTester { Self::new_from_spec(spec).await } + pub async fn new_with_hard_forks(altair: bool, bellatrix: bool) -> Self { + let mut spec = E::default_spec(); + spec.shard_committee_period = 2; + // Set whether the chain has undergone each hard fork. + if altair { + spec.altair_fork_epoch = Some(Epoch::new(0)); + } + if bellatrix { + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + } + Self::new_from_spec(spec).await + } + pub async fn new_from_spec(spec: ChainSpec) -> Self { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .spec(spec.clone()) - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); + // Get a random unused port + let port = unused_port::unused_tcp_port().unwrap(); + let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); + + let harness = Arc::new( + BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec.clone()) + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .mock_execution_layer_with_builder(beacon_url.clone()) + .build(), + ); harness.advance_slot(); @@ -88,17 +113,19 @@ impl ApiTester { let slot = harness.chain.slot().unwrap().as_u64(); if !SKIPPED_SLOTS.contains(&slot) { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; } harness.advance_slot(); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( harness.chain.slot().unwrap(), @@ -106,12 +133,14 @@ impl ApiTester { "precondition: current slot is one after head" ); - let (next_block, _next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, _next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; // `make_block` adds random graffiti, so this will produce an alternate block - let (reorg_block, _reorg_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (reorg_block, _reorg_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -162,15 +191,19 @@ impl ApiTester { let chain = harness.chain.clone(); assert_eq!( - chain.head_info().unwrap().finalized_checkpoint.epoch, + chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch, 2, "precondition: finality" ); assert_eq!( chain - .head_info() - .unwrap() - .current_justified_checkpoint + .canonical_head + .cached_head() + .justified_checkpoint() .epoch, 3, "precondition: justification" @@ -180,26 +213,30 @@ impl ApiTester { let ApiServer { server, - listening_socket, + listening_socket: _, shutdown_tx, network_rx, local_enr, external_peer_id, - } = create_api_server(chain.clone(), log).await; + } = create_api_server_on_port(chain.clone(), log, port).await; harness.runtime.task_executor.spawn(server, "api_server"); let client = BeaconNodeHttpClient::new( - SensitiveUrl::parse(&format!( - "http://{}:{}", - listening_socket.ip(), - listening_socket.port() - )) - .unwrap(), + beacon_url, Timeouts::set_all(Duration::from_secs(SECONDS_PER_SLOT)), ); + let builder_ref = harness.mock_builder.as_ref().unwrap().clone(); + harness.runtime.task_executor.spawn( + async move { builder_ref.run().await }, + "mock_builder_server", + ); + + let mock_builder = harness.mock_builder.clone(); + Self { + harness, chain, client, next_block, @@ -210,31 +247,34 @@ impl ApiTester { proposer_slashing, voluntary_exit, _server_shutdown: shutdown_tx, - validator_keypairs: harness.validator_keypairs, network_rx, local_enr, external_peer_id, - _runtime: harness.runtime, + mock_builder, } } pub async fn new_from_genesis() -> Self { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .default_spec() - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); + let harness = Arc::new( + BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(), + ); harness.advance_slot(); - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); - let (next_block, _next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, _next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; // `make_block` adds random graffiti, so this will produce an alternate block - let (reorg_block, _reorg_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (reorg_block, _reorg_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -279,6 +319,7 @@ impl ApiTester { ); Self { + harness, chain, client, next_block, @@ -289,14 +330,34 @@ impl ApiTester { proposer_slashing, voluntary_exit, _server_shutdown: shutdown_tx, - validator_keypairs: harness.validator_keypairs, network_rx, local_enr, external_peer_id, - _runtime: harness.runtime, + mock_builder: None, } } + fn validator_keypairs(&self) -> &[Keypair] { + &self.harness.validator_keypairs + } + + pub async fn new_mev_tester() -> Self { + let tester = Self::new_with_hard_forks(true, true) + .await + .test_post_validator_register_validator() + .await; + // Make sure bids always meet the minimum threshold. + tester + .mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_BUILDER_THRESHOLD_WEI, + ))); + tester + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -309,94 +370,47 @@ impl ApiTester { fn interesting_state_ids(&self) -> Vec { let mut ids = vec![ - StateId::Head, - StateId::Genesis, - StateId::Finalized, - StateId::Justified, - StateId::Slot(Slot::new(0)), - StateId::Slot(Slot::new(32)), - StateId::Slot(Slot::from(SKIPPED_SLOTS[0])), - StateId::Slot(Slot::from(SKIPPED_SLOTS[1])), - StateId::Slot(Slot::from(SKIPPED_SLOTS[2])), - StateId::Slot(Slot::from(SKIPPED_SLOTS[3])), - StateId::Root(Hash256::zero()), + StateId(CoreStateId::Head), + StateId(CoreStateId::Genesis), + StateId(CoreStateId::Finalized), + StateId(CoreStateId::Justified), + StateId(CoreStateId::Slot(Slot::new(0))), + StateId(CoreStateId::Slot(Slot::new(32))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[0]))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[1]))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[2]))), + StateId(CoreStateId::Slot(Slot::from(SKIPPED_SLOTS[3]))), + StateId(CoreStateId::Root(Hash256::zero())), ]; - ids.push(StateId::Root(self.chain.head_info().unwrap().state_root)); + ids.push(StateId(CoreStateId::Root( + self.chain.canonical_head.cached_head().head_state_root(), + ))); ids } fn interesting_block_ids(&self) -> Vec { let mut ids = vec![ - BlockId::Head, - BlockId::Genesis, - BlockId::Finalized, - BlockId::Justified, - BlockId::Slot(Slot::new(0)), - BlockId::Slot(Slot::new(32)), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[0])), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[1])), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[2])), - BlockId::Slot(Slot::from(SKIPPED_SLOTS[3])), - BlockId::Root(Hash256::zero()), + BlockId(CoreBlockId::Head), + BlockId(CoreBlockId::Genesis), + BlockId(CoreBlockId::Finalized), + BlockId(CoreBlockId::Justified), + BlockId(CoreBlockId::Slot(Slot::new(0))), + BlockId(CoreBlockId::Slot(Slot::new(32))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[0]))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[1]))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[2]))), + BlockId(CoreBlockId::Slot(Slot::from(SKIPPED_SLOTS[3]))), + BlockId(CoreBlockId::Root(Hash256::zero())), ]; - ids.push(BlockId::Root(self.chain.head_info().unwrap().block_root)); + ids.push(BlockId(CoreBlockId::Root( + self.chain.canonical_head.cached_head().head_block_root(), + ))); ids } - - fn get_state(&self, state_id: StateId) -> Option> { - match state_id { - StateId::Head => Some(self.chain.head().unwrap().beacon_state), - StateId::Genesis => self - .chain - .get_state(&self.chain.genesis_state_root, None) - .unwrap(), - StateId::Finalized => { - let finalized_slot = self - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .epoch - .start_slot(E::slots_per_epoch()); - - let root = self - .chain - .state_root_at_slot(finalized_slot) - .unwrap() - .unwrap(); - - self.chain.get_state(&root, Some(finalized_slot)).unwrap() - } - StateId::Justified => { - let justified_slot = self - .chain - .head_info() - .unwrap() - .current_justified_checkpoint - .epoch - .start_slot(E::slots_per_epoch()); - - let root = self - .chain - .state_root_at_slot(justified_slot) - .unwrap() - .unwrap(); - - self.chain.get_state(&root, Some(justified_slot)).unwrap() - } - StateId::Slot(slot) => { - let root = self.chain.state_root_at_slot(slot).unwrap().unwrap(); - - self.chain.get_state(&root, Some(slot)).unwrap() - } - StateId::Root(root) => self.chain.get_state(&root, None).unwrap(), - } - } - pub async fn test_beacon_genesis(self) -> Self { let result = self.client.get_beacon_genesis().await.unwrap().data; - let state = self.chain.head().unwrap().beacon_state; + let state = &self.chain.head_snapshot().beacon_state; let expected = GenesisData { genesis_time: state.genesis_time(), genesis_validators_root: state.genesis_validators_root(), @@ -412,39 +426,15 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_beacon_states_root(state_id) + .get_beacon_states_root(state_id.0) .await .unwrap() .map(|res| res.data.root); - let expected = match state_id { - StateId::Head => Some(self.chain.head_info().unwrap().state_root), - StateId::Genesis => Some(self.chain.genesis_state_root), - StateId::Finalized => { - let finalized_slot = self - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .epoch - .start_slot(E::slots_per_epoch()); - - self.chain.state_root_at_slot(finalized_slot).unwrap() - } - StateId::Justified => { - let justified_slot = self - .chain - .head_info() - .unwrap() - .current_justified_checkpoint - .epoch - .start_slot(E::slots_per_epoch()); - - self.chain.state_root_at_slot(justified_slot).unwrap() - } - StateId::Slot(slot) => self.chain.state_root_at_slot(slot).unwrap(), - StateId::Root(root) => Some(root), - }; + let expected = state_id + .root(&self.chain) + .ok() + .map(|(root, _execution_optimistic)| root); assert_eq!(result, expected, "{:?}", state_id); } @@ -456,12 +446,12 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_beacon_states_fork(state_id) + .get_beacon_states_fork(state_id.0) .await .unwrap() .map(|res| res.data); - let expected = self.get_state(state_id).map(|state| state.fork()); + let expected = state_id.fork(&self.chain).ok(); assert_eq!(result, expected, "{:?}", state_id); } @@ -473,18 +463,20 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_beacon_states_finality_checkpoints(state_id) + .get_beacon_states_finality_checkpoints(state_id.0) .await .unwrap() .map(|res| res.data); - let expected = self - .get_state(state_id) - .map(|state| FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }); + let expected = + state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }); assert_eq!(result, expected, "{:?}", state_id); } @@ -495,9 +487,9 @@ impl ApiTester { pub async fn test_beacon_states_validator_balances(self) -> Self { for state_id in self.interesting_state_ids() { for validator_indices in self.interesting_validator_indices() { - let state_opt = self.get_state(state_id); + let state_opt = state_id.state(&self.chain).ok(); let validators: Vec = match state_opt.as_ref() { - Some(state) => state.validators().clone().into(), + Some((state, _execution_optimistic)) => state.validators().clone().into(), None => vec![], }; let validator_index_ids = validator_indices @@ -520,7 +512,7 @@ impl ApiTester { let result_index_ids = self .client .get_beacon_states_validator_balances( - state_id, + state_id.0, Some(validator_index_ids.as_slice()), ) .await @@ -529,14 +521,14 @@ impl ApiTester { let result_pubkey_ids = self .client .get_beacon_states_validator_balances( - state_id, + state_id.0, Some(validator_pubkey_ids.as_slice()), ) .await .unwrap() .map(|res| res.data); - let expected = state_opt.map(|state| { + let expected = state_opt.map(|(state, _execution_optimistic)| { let mut validators = Vec::with_capacity(validator_indices.len()); for i in validator_indices { @@ -563,7 +555,10 @@ impl ApiTester { for state_id in self.interesting_state_ids() { for statuses in self.interesting_validator_statuses() { for validator_indices in self.interesting_validator_indices() { - let state_opt = self.get_state(state_id); + let state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); let validators: Vec = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -588,7 +583,7 @@ impl ApiTester { let result_index_ids = self .client .get_beacon_states_validators( - state_id, + state_id.0, Some(validator_index_ids.as_slice()), None, ) @@ -599,7 +594,7 @@ impl ApiTester { let result_pubkey_ids = self .client .get_beacon_states_validators( - state_id, + state_id.0, Some(validator_pubkey_ids.as_slice()), None, ) @@ -650,7 +645,10 @@ impl ApiTester { pub async fn test_beacon_states_validator_id(self) -> Self { for state_id in self.interesting_state_ids() { - let state_opt = self.get_state(state_id); + let state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); let validators = match state_opt.as_ref() { Some(state) => state.validators().clone().into(), None => vec![], @@ -665,7 +663,7 @@ impl ApiTester { for validator_id in validator_ids { let result = self .client - .get_beacon_states_validator_id(state_id, validator_id) + .get_beacon_states_validator_id(state_id.0, validator_id) .await .unwrap() .map(|res| res.data); @@ -702,12 +700,15 @@ impl ApiTester { pub async fn test_beacon_states_committees(self) -> Self { for state_id in self.interesting_state_ids() { - let mut state_opt = self.get_state(state_id); + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch()); let results = self .client - .get_beacon_states_committees(state_id, None, None, epoch_opt) + .get_beacon_states_committees(state_id.0, None, None, epoch_opt) .await .unwrap() .map(|res| res.data); @@ -744,31 +745,6 @@ impl ApiTester { self } - fn get_block_root(&self, block_id: BlockId) -> Option { - match block_id { - BlockId::Head => Some(self.chain.head_info().unwrap().block_root), - BlockId::Genesis => Some(self.chain.genesis_block_root), - BlockId::Finalized => Some(self.chain.head_info().unwrap().finalized_checkpoint.root), - BlockId::Justified => Some( - self.chain - .head_info() - .unwrap() - .current_justified_checkpoint - .root, - ), - BlockId::Slot(slot) => self - .chain - .block_root_at_slot(slot, WhenSlotSkipped::None) - .unwrap(), - BlockId::Root(root) => Some(root), - } - } - - async fn get_block(&self, block_id: BlockId) -> Option> { - let root = self.get_block_root(block_id)?; - self.chain.get_block(&root).await.unwrap() - } - pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -846,14 +822,17 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_headers_block_id(block_id) + .get_beacon_headers_block_id(block_id.0) .await .unwrap() .map(|res| res.data); - let block_root_opt = self.get_block_root(block_id); + let block_root_opt = block_id + .root(&self.chain) + .ok() + .map(|(root, _execution_optimistic)| root); - if let BlockId::Slot(slot) = block_id { + if let CoreBlockId::Slot(slot) = block_id.0 { if block_root_opt.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -861,11 +840,11 @@ impl ApiTester { } } - let block_opt = if let Some(root) = block_root_opt { - self.chain.get_block(&root).await.unwrap() - } else { - None - }; + let block_opt = block_id + .full_block(&self.chain) + .await + .ok() + .map(|(block, _execution_optimistic)| block); if block_opt.is_none() && result.is_none() { continue; @@ -903,13 +882,16 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_blocks_root(block_id) + .get_beacon_blocks_root(block_id.0) .await .unwrap() .map(|res| res.data.root); - let expected = self.get_block_root(block_id); - if let BlockId::Slot(slot) = block_id { + let expected = block_id + .root(&self.chain) + .ok() + .map(|(root, _execution_optimistic)| root); + if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -928,7 +910,7 @@ impl ApiTester { self.client.post_beacon_blocks(next_block).await.unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid blocks should be sent to network" ); @@ -942,7 +924,7 @@ impl ApiTester { assert!(self.client.post_beacon_blocks(&next_block).await.is_err()); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "invalid blocks should be sent to network" ); @@ -951,9 +933,13 @@ impl ApiTester { pub async fn test_beacon_blocks(self) -> Self { for block_id in self.interesting_block_ids() { - let expected = self.get_block(block_id).await; + let expected = block_id + .full_block(&self.chain) + .await + .ok() + .map(|(block, _execution_optimistic)| block); - if let BlockId::Slot(slot) = block_id { + if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -962,10 +948,10 @@ impl ApiTester { } // Check the JSON endpoint. - let json_result = self.client.get_beacon_blocks(block_id).await.unwrap(); + let json_result = self.client.get_beacon_blocks(block_id.0).await.unwrap(); if let (Some(json), Some(expected)) = (&json_result, &expected) { - assert_eq!(json.data, *expected, "{:?}", block_id); + assert_eq!(&json.data, expected.as_ref(), "{:?}", block_id); assert_eq!( json.version, Some(expected.fork_name(&self.chain.spec).unwrap()) @@ -978,23 +964,28 @@ impl ApiTester { // Check the SSZ endpoint. let ssz_result = self .client - .get_beacon_blocks_ssz(block_id, &self.chain.spec) + .get_beacon_blocks_ssz(block_id.0, &self.chain.spec) .await .unwrap(); - assert_eq!(ssz_result, expected, "{:?}", block_id); + assert_eq!( + ssz_result.as_ref(), + expected.as_ref().map(|b| b.as_ref()), + "{:?}", + block_id + ); // Check that the legacy v1 API still works but doesn't return a version field. - let v1_result = self.client.get_beacon_blocks_v1(block_id).await.unwrap(); + let v1_result = self.client.get_beacon_blocks_v1(block_id.0).await.unwrap(); if let (Some(v1_result), Some(expected)) = (&v1_result, &expected) { assert_eq!(v1_result.version, None); - assert_eq!(v1_result.data, *expected); + assert_eq!(&v1_result.data, expected.as_ref()); } else { assert_eq!(v1_result, None); assert_eq!(expected, None); } // Check that version headers are provided. - let url = self.client.get_beacon_blocks_path(block_id).unwrap(); + let url = self.client.get_beacon_blocks_path(block_id.0).unwrap(); let builders: Vec RequestBuilder> = vec![ |b| b, @@ -1029,17 +1020,18 @@ impl ApiTester { for block_id in self.interesting_block_ids() { let result = self .client - .get_beacon_blocks_attestations(block_id) + .get_beacon_blocks_attestations(block_id.0) .await .unwrap() .map(|res| res.data); - let expected = self - .get_block(block_id) - .await - .map(|block| block.message().body().attestations().clone().into()); + let expected = block_id.full_block(&self.chain).await.ok().map( + |(block, _execution_optimistic)| { + block.message().body().attestations().clone().into() + }, + ); - if let BlockId::Slot(slot) = block_id { + if let CoreBlockId::Slot(slot) = block_id.0 { if expected.is_none() { assert!(SKIPPED_SLOTS.contains(&slot.as_u64())); } else { @@ -1060,7 +1052,7 @@ impl ApiTester { .unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid attestation should be sent to network" ); @@ -1097,7 +1089,7 @@ impl ApiTester { } assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "if some attestations are valid, we should send them to the network" ); @@ -1127,7 +1119,7 @@ impl ApiTester { .unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid attester slashing should be sent to network" ); @@ -1144,7 +1136,7 @@ impl ApiTester { .unwrap_err(); assert!( - self.network_rx.recv().now_or_never().is_none(), + self.network_rx.network_recv.recv().now_or_never().is_none(), "invalid attester slashing should not be sent to network" ); @@ -1173,7 +1165,7 @@ impl ApiTester { .unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid proposer slashing should be sent to network" ); @@ -1190,7 +1182,7 @@ impl ApiTester { .unwrap_err(); assert!( - self.network_rx.recv().now_or_never().is_none(), + self.network_rx.network_recv.recv().now_or_never().is_none(), "invalid proposer slashing should not be sent to network" ); @@ -1219,7 +1211,7 @@ impl ApiTester { .unwrap(); assert!( - self.network_rx.recv().await.is_some(), + self.network_rx.network_recv.recv().await.is_some(), "valid exit should be sent to network" ); @@ -1236,7 +1228,7 @@ impl ApiTester { .unwrap_err(); assert!( - self.network_rx.recv().now_or_never().is_none(), + self.network_rx.network_recv.recv().now_or_never().is_none(), "invalid exit should not be sent to network" ); @@ -1272,10 +1264,13 @@ impl ApiTester { } pub async fn test_get_config_spec(self) -> Self { - let result = self.client.get_config_spec().await.unwrap().data; - - let mut expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec); - expected.make_backwards_compat(&self.chain.spec); + let result = self + .client + .get_config_spec::() + .await + .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .unwrap(); + let expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec, None); assert_eq!(result, expected); @@ -1314,11 +1309,12 @@ impl ApiTester { pub async fn test_get_node_syncing(self) -> Self { let result = self.client.get_node_syncing().await.unwrap().data; - let head_slot = self.chain.head_info().unwrap().slot; + let head_slot = self.chain.canonical_head.cached_head().head_slot(); let sync_distance = self.chain.slot().unwrap() - head_slot; let expected = SyncingData { is_syncing: false, + is_optimistic: Some(false), head_slot, sync_distance, }; @@ -1442,9 +1438,16 @@ impl ApiTester { pub async fn test_get_debug_beacon_states(self) -> Self { for state_id in self.interesting_state_ids() { - let result_json = self.client.get_debug_beacon_states(state_id).await.unwrap(); + let result_json = self + .client + .get_debug_beacon_states(state_id.0) + .await + .unwrap(); - let mut expected = self.get_state(state_id); + let mut expected = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); expected.as_mut().map(|state| state.drop_all_caches()); if let (Some(json), Some(expected)) = (&result_json, &expected) { @@ -1461,7 +1464,7 @@ impl ApiTester { // Check SSZ API. let result_ssz = self .client - .get_debug_beacon_states_ssz(state_id, &self.chain.spec) + .get_debug_beacon_states_ssz(state_id.0, &self.chain.spec) .await .unwrap(); assert_eq!(result_ssz, expected, "{:?}", state_id); @@ -1469,7 +1472,7 @@ impl ApiTester { // Check legacy v1 API. let result_v1 = self .client - .get_debug_beacon_states_v1(state_id) + .get_debug_beacon_states_v1(state_id.0) .await .unwrap(); @@ -1482,7 +1485,10 @@ impl ApiTester { } // Check that version headers are provided. - let url = self.client.get_debug_beacon_states_path(state_id).unwrap(); + let url = self + .client + .get_debug_beacon_states_path(state_id.0) + .unwrap(); let builders: Vec RequestBuilder> = vec![|b| b, |b| b.accept(Accept::Ssz)]; @@ -1528,7 +1534,7 @@ impl ApiTester { } fn validator_count(&self) -> usize { - self.chain.head().unwrap().beacon_state.validators().len() + self.chain.head_snapshot().beacon_state.validators().len() } fn interesting_validator_indices(&self) -> Vec> { @@ -1613,7 +1619,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); assert_eq!(results.dependent_root, dependent_root); @@ -1688,7 +1694,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); // Presently, the beacon chain harness never runs the code that primes the proposer // cache. If this changes in the future then we'll need some smarter logic here, but @@ -1760,6 +1766,7 @@ impl ApiTester { let expected = DutiesResponse { data: expected_duties, + execution_optimistic: Some(false), dependent_root, }; @@ -1816,7 +1823,7 @@ impl ApiTester { WhenSlotSkipped::Prev, ) .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + .unwrap_or(self.chain.head_beacon_block_root()); self.client .get_validator_duties_proposer(current_epoch) @@ -1870,7 +1877,7 @@ impl ApiTester { } pub async fn test_block_production(self) -> Self { - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; for _ in 0..E::slots_per_epoch() * 3 { @@ -1890,7 +1897,7 @@ impl ApiTester { let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); let sk = self - .validator_keypairs + .validator_keypairs() .iter() .find(|kp| kp.pk == proposer_pubkey) .map(|kp| kp.sk.clone()) @@ -1918,7 +1925,7 @@ impl ApiTester { self.client.post_beacon_blocks(&signed_block).await.unwrap(); - assert_eq!(self.chain.head_beacon_block().unwrap(), signed_block); + assert_eq!(self.chain.head_beacon_block().as_ref(), &signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } @@ -1949,7 +1956,7 @@ impl ApiTester { } pub async fn test_block_production_verify_randao_invalid(self) -> Self { - let fork = self.chain.head_info().unwrap().fork; + let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; for _ in 0..E::slots_per_epoch() { @@ -1969,7 +1976,7 @@ impl ApiTester { let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); let sk = self - .validator_keypairs + .validator_keypairs() .iter() .find(|kp| kp.pk == proposer_pubkey) .map(|kp| kp.sk.clone()) @@ -2031,8 +2038,177 @@ impl ApiTester { self } + pub async fn test_blinded_block_production>(&self) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block = self + .client + .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .await + .unwrap() + .data; + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blinded_blocks(&signed_block) + .await + .unwrap(); + + // This converts the generic `Payload` to a concrete type for comparison. + let head_block = SignedBeaconBlock::from(signed_block.clone()); + assert_eq!(head_block, signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + } + + pub async fn test_blinded_block_production_no_verify_randao>( + self, + ) -> Self { + for _ in 0..E::slots_per_epoch() { + let slot = self.chain.slot().unwrap(); + + let block = self + .client + .get_validator_blinded_blocks_with_verify_randao::( + slot, + None, + None, + Some(false), + ) + .await + .unwrap() + .data; + assert_eq!(block.slot(), slot); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + + pub async fn test_blinded_block_production_verify_randao_invalid>( + self, + ) -> Self { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let bad_randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = (epoch + 1).signing_root(domain); + sk.sign(message).into() + }; + + // Check failure with no `verify_randao` passed. + self.client + .get_validator_blinded_blocks::(slot, &bad_randao_reveal, None) + .await + .unwrap_err(); + + // Check failure with `verify_randao=true`. + self.client + .get_validator_blinded_blocks_with_verify_randao::( + slot, + Some(&bad_randao_reveal), + None, + Some(true), + ) + .await + .unwrap_err(); + + // Check failure with no randao reveal provided. + self.client + .get_validator_blinded_blocks_with_verify_randao::( + slot, None, None, None, + ) + .await + .unwrap_err(); + + // Check success with `verify_randao=false`. + let block = self + .client + .get_validator_blinded_blocks_with_verify_randao::( + slot, + Some(&bad_randao_reveal), + None, + Some(false), + ) + .await + .unwrap() + .data; + + assert_eq!(block.slot(), slot); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + pub async fn test_get_validator_attestation_data(self) -> Self { - let mut state = self.chain.head_beacon_state().unwrap(); + let mut state = self.chain.head_beacon_state_cloned(); let slot = state.slot(); state .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) @@ -2062,7 +2238,6 @@ impl ApiTester { let attestation = self .chain .head_beacon_block() - .unwrap() .message() .body() .attestations()[0] @@ -2090,7 +2265,7 @@ impl ApiTester { let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); - let mut head = self.chain.head().unwrap(); + let mut head = self.chain.head_snapshot().as_ref().clone(); while head.beacon_state.current_epoch() < epoch { per_slot_processing(&mut head.beacon_state, None, &self.chain.spec).unwrap(); } @@ -2106,7 +2281,7 @@ impl ApiTester { .client .post_validator_duties_attester( epoch, - (0..self.validator_keypairs.len() as u64) + (0..self.validator_keypairs().len() as u64) .collect::>() .as_slice(), ) @@ -2115,7 +2290,7 @@ impl ApiTester { .data; let (i, kp, duty, proof) = self - .validator_keypairs + .validator_keypairs() .iter() .enumerate() .find_map(|(i, kp)| { @@ -2187,7 +2362,7 @@ impl ApiTester { .await .unwrap(); - assert!(self.network_rx.recv().await.is_some()); + assert!(self.network_rx.network_recv.recv().await.is_some()); self } @@ -2202,7 +2377,7 @@ impl ApiTester { .await .unwrap_err(); - assert!(self.network_rx.recv().now_or_never().is_none()); + assert!(self.network_rx.network_recv.recv().now_or_never().is_none()); self } @@ -2221,11 +2396,845 @@ impl ApiTester { .await .unwrap(); - self.network_rx.recv().now_or_never().unwrap(); + self.network_rx + .validator_subscription_recv + .recv() + .now_or_never() + .unwrap(); self } + pub async fn test_post_validator_register_validator(self) -> Self { + let mut registrations = vec![]; + let mut fee_recipients = vec![]; + + let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); + let fork = Fork { + current_version: self.chain.spec.genesis_fork_version, + previous_version: self.chain.spec.genesis_fork_version, + epoch: genesis_epoch, + }; + + let expected_gas_limit = 11_111_111; + + for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { + let pubkey = keypair.pk.compress(); + let fee_recipient = Address::from_low_u64_be(val_index as u64); + + let data = ValidatorRegistrationData { + fee_recipient, + gas_limit: expected_gas_limit, + timestamp: 0, + pubkey, + }; + + let domain = self.chain.spec.get_domain( + genesis_epoch, + Domain::ApplicationMask(ApplicationDomain::Builder), + &fork, + Hash256::zero(), + ); + let message = data.signing_root(domain); + let signature = keypair.sk.sign(message); + + let signed = SignedValidatorRegistrationData { + message: data, + signature, + }; + + fee_recipients.push(fee_recipient); + registrations.push(signed); + } + + self.client + .post_validator_register_validator(®istrations) + .await + .unwrap(); + + for (val_index, (_, fee_recipient)) in self + .chain + .head_snapshot() + .beacon_state + .validators() + .into_iter() + .zip(fee_recipients.into_iter()) + .enumerate() + { + let actual = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_suggested_fee_recipient(val_index as u64) + .await; + assert_eq!(actual, fee_recipient); + } + + self + } + + pub async fn test_post_validator_register_validator_slashed(self) -> Self { + // slash a validator + self.client + .post_beacon_pool_attester_slashings(&self.attester_slashing) + .await + .unwrap(); + + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut registrations = vec![]; + let mut fee_recipients = vec![]; + + let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); + let fork = Fork { + current_version: self.chain.spec.genesis_fork_version, + previous_version: self.chain.spec.genesis_fork_version, + epoch: genesis_epoch, + }; + + let expected_gas_limit = 11_111_111; + + for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { + let pubkey = keypair.pk.compress(); + let fee_recipient = Address::from_low_u64_be(val_index as u64); + + let data = ValidatorRegistrationData { + fee_recipient, + gas_limit: expected_gas_limit, + timestamp: 0, + pubkey, + }; + + let domain = self.chain.spec.get_domain( + genesis_epoch, + Domain::ApplicationMask(ApplicationDomain::Builder), + &fork, + Hash256::zero(), + ); + let message = data.signing_root(domain); + let signature = keypair.sk.sign(message); + + let signed = SignedValidatorRegistrationData { + message: data, + signature, + }; + + fee_recipients.push(fee_recipient); + registrations.push(signed); + } + + self.client + .post_validator_register_validator(®istrations) + .await + .unwrap(); + + for (val_index, (_, fee_recipient)) in self + .chain + .head_snapshot() + .beacon_state + .validators() + .into_iter() + .zip(fee_recipients.into_iter()) + .enumerate() + { + let actual = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_suggested_fee_recipient(val_index as u64) + .await; + if val_index == 0 || val_index == 1 { + assert_eq!(actual, Address::from_low_u64_be(val_index as u64)); + } else { + assert_eq!(actual, fee_recipient); + } + } + + self + } + + // Helper function for tests that require a valid RANDAO signature. + async fn get_test_randao(&self, slot: Slot, epoch: Epoch) -> (u64, SignatureBytes) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + let (proposer_pubkey_bytes, proposer_index) = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| (duty.pubkey, duty.validator_index)) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = + self.chain + .spec + .get_domain(epoch, Domain::Randao, &fork, genesis_validators_root); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + (proposer_index, randao_reveal) + } + + pub async fn test_payload_respects_registration(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + assert_eq!(payload.execution_payload_header.gas_limit, 11_111_111); + + // If this cache is empty, it indicates fallback was not used, so the payload came from the + // mock builder. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_accepts_mutated_gas_limit(self) -> Self { + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::GasLimit(30_000_000)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + assert_eq!(payload.execution_payload_header.gas_limit, 30_000_000); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_accepts_changed_fee_recipient(self) -> Self { + let test_fee_recipient = "0x4242424242424242424242424242424242424242" + .parse::
() + .unwrap(); + + // Mutate fee recipient. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::FeeRecipient(test_fee_recipient)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.fee_recipient, + test_fee_recipient + ); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_rejects_invalid_parent_hash(self) -> Self { + let invalid_parent_hash = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate parent hash. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::ParentHash(invalid_parent_hash)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_parent_hash = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_hash; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.parent_hash, + expected_parent_hash + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_prev_randao(self) -> Self { + let invalid_prev_randao = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate prev randao. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::PrevRandao(invalid_prev_randao)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_prev_randao = self + .chain + .canonical_head + .cached_head() + .head_random() + .unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.prev_randao, + expected_prev_randao + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_block_number(self) -> Self { + let invalid_block_number = 2; + + // Mutate block number. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::BlockNumber(invalid_block_number)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_block_number = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_number + + 1; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.block_number, + expected_block_number + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_timestamp(self) -> Self { + let invalid_timestamp = 2; + + // Mutate timestamp. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Timestamp(invalid_timestamp)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let min_expected_timestamp = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .timestamp; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert!(payload.execution_payload_header.timestamp > min_expected_timestamp); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_signature(self) -> Self { + self.mock_builder + .as_ref() + .unwrap() + .builder + .invalid_signatures(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_chain_health_skips(self) -> Self { + let slot = self.chain.slot().unwrap(); + + // Since we are proposing this slot, start the count from the previous slot. + let prev_slot = slot - Slot::new(1); + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + let epoch = self.chain.epoch().unwrap(); + + // Inclusive here to make sure we advance one slot past the threshold. + for _ in (prev_slot - head_slot).as_usize()..=self.chain.config.builder_fallback_skips { + self.harness.advance_slot(); + } + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_chain_health_skips_per_epoch(self) -> Self { + // Fill an epoch with `builder_fallback_skips_per_epoch` skip slots. + for i in 0..E::slots_per_epoch() { + if i == 0 || i as usize > self.chain.config.builder_fallback_skips_per_epoch { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + + // Without proposing, advance into the next slot, this should make us cross the threshold + // number of skips, causing us to use the fallback. + self.harness.advance_slot(); + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + self + } + + pub async fn test_builder_chain_health_epochs_since_finalization(self) -> Self { + let skips = E::slots_per_epoch() + * self.chain.config.builder_fallback_epochs_since_finalization as u64; + + for _ in 0..skips { + self.harness.advance_slot(); + } + + // Fill the next epoch with blocks, should be enough to justify, not finalize. + for _ in 0..E::slots_per_epoch() { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this + // scenario starts at an epoch boundary). + for _ in 0..E::slots_per_epoch() + 1 { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + + self + } + + pub async fn test_builder_chain_health_optimistic_head(self) -> Self { + // Make sure the next payload verification will return optimistic before advancing the chain. + self.harness.mock_execution_layer.as_ref().map(|el| { + el.server.all_payloads_syncing(true); + el + }); + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + self + } + + pub async fn test_payload_rejects_inadequate_builder_threshold(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Value(Uint256::from( + DEFAULT_BUILDER_THRESHOLD_WEI - 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -2302,11 +3311,14 @@ impl ApiTester { for state_id in self.interesting_state_ids() { let result = self .client - .get_lighthouse_beacon_states_ssz(&state_id, &self.chain.spec) + .get_lighthouse_beacon_states_ssz(&state_id.0, &self.chain.spec) .await .unwrap(); - let mut expected = self.get_state(state_id); + let mut expected = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic)| state); expected.as_mut().map(|state| state.drop_all_caches()); assert_eq!(result, expected, "{:?}", state_id); @@ -2348,7 +3360,7 @@ impl ApiTester { pub async fn test_post_lighthouse_liveness(self) -> Self { let epoch = self.chain.epoch().unwrap(); - let head_state = self.chain.head_beacon_state().unwrap(); + let head_state = self.chain.head_beacon_state_cloned(); let indices = (0..head_state.validators().len()) .map(|i| i as u64) .collect::>(); @@ -2465,7 +3477,7 @@ impl ApiTester { let block_root = self.next_block.canonical_root(); // current_duty_dependent_root = block root because this is the first slot of the epoch - let current_duty_dependent_root = self.chain.head_beacon_block_root().unwrap(); + let current_duty_dependent_root = self.chain.head_beacon_block_root(); let current_slot = self.chain.slot().unwrap(); let next_slot = self.next_block.slot(); let finalization_distance = E::slots_per_epoch() * 2; @@ -2473,6 +3485,7 @@ impl ApiTester { let expected_block = EventKind::Block(SseBlock { block: block_root, slot: next_slot, + execution_optimistic: false, }); let expected_head = EventKind::Head(SseHead { @@ -2486,20 +3499,26 @@ impl ApiTester { .unwrap() .unwrap(), epoch_transition: true, + execution_optimistic: false, }); + let finalized_block_root = self + .chain + .block_root_at_slot(next_slot - finalization_distance, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let finalized_block = self + .chain + .get_blinded_block(&finalized_block_root) + .unwrap() + .unwrap(); + let finalized_state_root = finalized_block.state_root(); + let expected_finalized = EventKind::FinalizedCheckpoint(SseFinalizedCheckpoint { - block: self - .chain - .block_root_at_slot(next_slot - finalization_distance, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(), - state: self - .chain - .state_root_at_slot(next_slot - finalization_distance) - .unwrap() - .unwrap(), + block: finalized_block_root, + state: finalized_state_root, epoch: Epoch::new(3), + execution_optimistic: false, }); self.client @@ -2510,7 +3529,7 @@ impl ApiTester { let block_events = poll_events(&mut events_future, 3, Duration::from_millis(10000)).await; assert_eq!( block_events.as_slice(), - &[expected_block, expected_finalized, expected_head] + &[expected_block, expected_head, expected_finalized] ); // Test a reorg event @@ -2528,6 +3547,7 @@ impl ApiTester { new_head_block: self.reorg_block.canonical_root(), new_head_state: self.reorg_block.state_root(), epoch: self.next_block.slot().epoch(E::slots_per_epoch()), + execution_optimistic: false, }); self.client @@ -2594,6 +3614,7 @@ impl ApiTester { let expected_block = EventKind::Block(SseBlock { block: block_root, slot: next_slot, + execution_optimistic: false, }); let expected_head = EventKind::Head(SseHead { @@ -2603,6 +3624,7 @@ impl ApiTester { current_duty_dependent_root: self.chain.genesis_block_root, previous_duty_dependent_root: self.chain.genesis_block_root, epoch_transition: false, + execution_optimistic: false, }); self.client @@ -2615,6 +3637,40 @@ impl ApiTester { self } + + pub async fn test_check_optimistic_responses(&mut self) { + // Check responses are not optimistic. + let result = self + .client + .get_beacon_headers_block_id(CoreBlockId::Head) + .await + .unwrap() + .unwrap(); + + assert_eq!(result.execution_optimistic, Some(false)); + + // Change head to be optimistic. + self.chain + .canonical_head + .fork_choice_write_lock() + .proto_array_mut() + .core_proto_array_mut() + .nodes + .last_mut() + .map(|head_node| { + head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) + }); + + // Check responses are now optimistic. + let result = self + .client + .get_beacon_headers_block_id(CoreBlockId::Head) + .await + .unwrap() + .unwrap(); + + assert_eq!(result.execution_optimistic, Some(true)); + } } async fn poll_events, eth2::Error>> + Unpin, T: EthSpec>( @@ -2897,6 +3953,72 @@ async fn block_production_verify_randao_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_with_skip_slots_full_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_no_verify_randao_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_no_verify_randao::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_verify_randao_invalid_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_verify_randao_invalid::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_no_verify_randao::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_verify_randao_invalid_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_verify_randao_invalid::>() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_attestation_data() { ApiTester::new() @@ -2973,6 +4095,126 @@ async fn get_validator_beacon_committee_subscriptions() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_validator() { + ApiTester::new() + .await + .test_post_validator_register_validator() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_validator_slashed() { + ApiTester::new() + .await + .test_post_validator_register_validator_slashed() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_valid() { + ApiTester::new_mev_tester() + .await + .test_payload_respects_registration() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_gas_limit_mutation() { + ApiTester::new_mev_tester() + .await + .test_payload_accepts_mutated_gas_limit() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_fee_recipient_mutation() { + ApiTester::new_mev_tester() + .await + .test_payload_accepts_changed_fee_recipient() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_parent_hash() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_parent_hash() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_prev_randao() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_prev_randao() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_block_number() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_block_number() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_timestamp() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_timestamp() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_signature() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_signature() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_skips() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips_per_epoch() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_skips_per_epoch() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_epochs_since_finalization() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_epochs_since_finalization() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_optimistic_head() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_optimistic_head() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_inadequate_builder_threshold() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_inadequate_builder_threshold() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() @@ -3004,3 +4246,11 @@ async fn lighthouse_endpoints() { .test_post_lighthouse_liveness() .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn optimistic_responses() { + ApiTester::new_with_hard_forks(true, true) + .await + .test_check_optimistic_responses() + .await; +} diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 5ed3614de6..c6ba530508 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -8,7 +8,6 @@ edition = "2021" discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } -hashset_delay = { path = "../../common/hashset_delay" } eth2_ssz_types = "0.2.2" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" @@ -38,11 +37,12 @@ directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" -prometheus-client = "0.15.0" +prometheus-client = "0.16.0" unused_port = { path = "../../common/unused_port" } +delay_map = "0.1.1" [dependencies.libp2p] -version = "0.43.0" +version = "0.45.1" default-features = false features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] @@ -52,6 +52,8 @@ slog-async = "2.5.0" tempfile = "3.1.0" exit-future = "0.2.0" void = "1" +quickcheck = "0.9.2" +quickcheck_macros = "0.9.1" [features] libp2p-websocket = [] diff --git a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs b/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs index 93687e555b..4842605f7a 100644 --- a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs @@ -30,9 +30,9 @@ pub struct GossipCache { proposer_slashing: Option, /// Timeout for attester slashings. attester_slashing: Option, - /// Timeout for aggregated sync commitee signatures. + /// Timeout for aggregated sync committee signatures. signed_contribution_and_proof: Option, - /// Timeout for sync commitee messages. + /// Timeout for sync committee messages. sync_committee_message: Option, } @@ -51,9 +51,9 @@ pub struct GossipCacheBuilder { proposer_slashing: Option, /// Timeout for attester slashings. attester_slashing: Option, - /// Timeout for aggregated sync commitee signatures. + /// Timeout for aggregated sync committee signatures. signed_contribution_and_proof: Option, - /// Timeout for sync commitee messages. + /// Timeout for sync committee messages. sync_committee_message: Option, } @@ -101,13 +101,13 @@ impl GossipCacheBuilder { self } - /// Timeout for aggregated sync commitee signatures. + /// Timeout for aggregated sync committee signatures. pub fn signed_contribution_and_proof_timeout(mut self, timeout: Duration) -> Self { self.signed_contribution_and_proof = Some(timeout); self } - /// Timeout for sync commitee messages. + /// Timeout for sync committee messages. pub fn sync_committee_message_timeout(mut self, timeout: Duration) -> Self { self.sync_committee_message = Some(timeout); self diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index e67bb29de3..9c9e094db6 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -1006,9 +1006,6 @@ where proto, error, } => { - if matches!(error, RPCError::HandlerRejected) { - // this peer's request got canceled - } // Inform the peer manager of the error. // An inbound error here means we sent an error to the peer, or the stream // timed out. @@ -1068,11 +1065,33 @@ where // propagate the STATUS message upwards self.propagate_request(peer_request_id, peer_id, Request::Status(msg)) } - InboundRequest::BlocksByRange(req) => self.propagate_request( - peer_request_id, - peer_id, - Request::BlocksByRange(req), - ), + InboundRequest::BlocksByRange(req) => { + let methods::OldBlocksByRangeRequest { + start_slot, + mut count, + step, + } = req; + // Still disconnect the peer if the request is naughty. + if step == 0 { + self.peer_manager.handle_rpc_error( + &peer_id, + Protocol::BlocksByRange, + &RPCError::InvalidData( + "Blocks by range with 0 step parameter".into(), + ), + ConnectionDirection::Incoming, + ); + } + // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 + if step > 1 { + count = 1; + } + self.propagate_request( + peer_request_id, + peer_id, + Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }), + ); + } InboundRequest::BlocksByRoot(req) => { self.propagate_request(peer_request_id, peer_id, Request::BlocksByRoot(req)) } @@ -1316,7 +1335,13 @@ impl std::convert::From for OutboundRequest { fn from(req: Request) -> OutboundRequest { match req { Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), - Request::BlocksByRange(r) => OutboundRequest::BlocksByRange(r), + Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => { + OutboundRequest::BlocksByRange(methods::OldBlocksByRangeRequest { + start_slot, + count, + step: 1, + }) + } Request::Status(s) => OutboundRequest::Status(s), } } @@ -1333,9 +1358,9 @@ pub enum Response { /// A Status message. Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. - BlocksByRange(Option>>), + BlocksByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Option>>), + BlocksByRoot(Option>>), } impl std::convert::From> for RPCCodedResponse { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 85c0ddd950..63d0816604 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -5,8 +5,8 @@ use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCo use crate::{error, metrics, Gossipsub}; use crate::{NetworkGlobals, PeerId}; use crate::{Subnet, SubnetDiscovery}; +use delay_map::HashSetDelay; use discv5::Enr; -use hashset_delay::HashSetDelay; use libp2p::identify::IdentifyInfo; use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult}; use rand::seq::SliceRandom; @@ -457,10 +457,7 @@ impl PeerManager { debug!(self.log, "Internal RPC Error"; "error" => %e, "peer_id" => %peer_id); return; } - RPCError::HandlerRejected => { - // Our fault. Do nothing - return; - } + RPCError::HandlerRejected => PeerAction::Fatal, RPCError::InvalidData(_) => { // Peer is not complying with the protocol. This is considered a malicious action PeerAction::Fatal @@ -484,7 +481,15 @@ impl PeerManager { // implement a new sync type which tracks these peers and prevents the sync // algorithms from requesting blocks from them (at least for a set period of // time, multiple failures would then lead to a ban). - PeerAction::Fatal + + match direction { + // If the blocks request was initiated by us, then we have no use of this + // peer and so we ban it. + ConnectionDirection::Outgoing => PeerAction::Fatal, + // If the blocks request was initiated by the peer, then we let the peer decide if + // it wants to continue talking to us, we do not ban the peer. + ConnectionDirection::Incoming => return, + } } RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError, RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError, @@ -1015,20 +1020,17 @@ impl PeerManager { let mut removed_peer_index = None; for (index, (candidate_peer, info)) in peers_on_subnet.iter().enumerate() { // Ensure we don't remove too many outbound peers - if info.is_outbound_only() { - if self.target_outbound_peers() - < connected_outbound_peer_count + if info.is_outbound_only() + && self.target_outbound_peers() + >= connected_outbound_peer_count .saturating_sub(outbound_peers_pruned) - { - outbound_peers_pruned += 1; - } else { - // Restart the main loop with the outbound peer removed from - // the list. This will lower the peers per subnet count and - // potentially a new subnet may be chosen to remove peers. This - // can occur recursively until we have no peers left to choose - // from. - continue; - } + { + // Restart the main loop with the outbound peer removed from + // the list. This will lower the peers per subnet count and + // potentially a new subnet may be chosen to remove peers. This + // can occur recursively until we have no peers left to choose + // from. + continue; } // Check the sync committee @@ -1051,6 +1053,9 @@ impl PeerManager { } } + if info.is_outbound_only() { + outbound_peers_pruned += 1; + } // This peer is suitable to be pruned removed_peer_index = Some(index); break; @@ -1885,4 +1890,289 @@ mod tests { assert!(!connected_peers.contains(&peers[1])); assert!(!connected_peers.contains(&peers[2])); } + + /// This test is for reproducing the issue: + /// https://github.com/sigp/lighthouse/pull/3236#issue-1256432659 + /// + /// Whether the issue happens depends on `subnet_to_peer` (HashMap), since HashMap doesn't + /// guarantee a particular order of iteration. So we repeat the test case to try to reproduce + /// the issue. + #[tokio::test] + async fn test_peer_manager_prune_based_on_subnet_count_repeat() { + for _ in 0..100 { + test_peer_manager_prune_based_on_subnet_count().await; + } + } + + /// Test the pruning logic to prioritize peers with the most subnets. This test specifies + /// the connection direction for the peers. + /// Either Peer 4 or 5 is expected to be removed in this test case. + /// + /// Create 8 peers. + /// Peer0 (out) : Subnet 1, Sync-committee-1 + /// Peer1 (out) : Subnet 1, Sync-committee-1 + /// Peer2 (out) : Subnet 2, Sync-committee-2 + /// Peer3 (out) : Subnet 2, Sync-committee-2 + /// Peer4 (out) : Subnet 3 + /// Peer5 (out) : Subnet 3 + /// Peer6 (in) : Subnet 4 + /// Peer7 (in) : Subnet 5 + async fn test_peer_manager_prune_based_on_subnet_count() { + let target = 7; + let mut peer_manager = build_peer_manager(target).await; + + // Create 8 peers to connect to. + let mut peers = Vec::new(); + for x in 0..8 { + let peer = PeerId::random(); + + // Have some of the peers be on a long-lived subnet + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + + match x { + 0 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(1, true).unwrap(); + syncnets.set(1, true).unwrap(); + } + 1 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(1, true).unwrap(); + syncnets.set(1, true).unwrap(); + } + 2 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(2, true).unwrap(); + syncnets.set(2, true).unwrap(); + } + 3 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(2, true).unwrap(); + syncnets.set(2, true).unwrap(); + } + 4 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(3, true).unwrap(); + } + 5 => { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(3, true).unwrap(); + } + 6 => { + peer_manager.inject_connect_ingoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(4, true).unwrap(); + } + 7 => { + peer_manager.inject_connect_ingoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + attnets.set(5, true).unwrap(); + } + _ => unreachable!(), + } + + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets, + }; + peer_manager + .network_globals + .peers + .write() + .peer_info_mut(&peer) + .unwrap() + .set_meta_data(MetaData::V2(metadata)); + let long_lived_subnets = peer_manager + .network_globals + .peers + .read() + .peer_info(&peer) + .unwrap() + .long_lived_subnets(); + println!("{},{}", x, peer); + for subnet in long_lived_subnets { + println!("Subnet: {:?}", subnet); + peer_manager + .network_globals + .peers + .write() + .add_subscription(&peer, subnet); + } + peers.push(peer); + } + + // Perform the heartbeat. + peer_manager.heartbeat(); + + // Tests that when we are over the target peer limit, after disconnecting an unhealthy peer, + // the number of connected peers updates and we will not remove too many peers. + assert_eq!( + peer_manager.network_globals.connected_or_dialing_peers(), + target + ); + + let connected_peers: std::collections::HashSet<_> = peer_manager + .network_globals + .peers + .read() + .connected_or_dialing_peers() + .cloned() + .collect(); + + // Either peer 4 or 5 should be removed. + // Check that we keep 6 and 7 peers, which we have few on a particular subnet. + assert!(connected_peers.contains(&peers[6])); + assert!(connected_peers.contains(&peers[7])); + } + + // Test properties PeerManager should have using randomly generated input. + #[cfg(test)] + mod property_based_tests { + use crate::peer_manager::config::DEFAULT_TARGET_PEERS; + use crate::peer_manager::tests::build_peer_manager; + use crate::rpc::MetaData; + use libp2p::PeerId; + use quickcheck::{Arbitrary, Gen, TestResult}; + use quickcheck_macros::quickcheck; + use tokio::runtime::Runtime; + use types::Unsigned; + use types::{EthSpec, MainnetEthSpec as E}; + + #[derive(Clone, Debug)] + struct PeerCondition { + outgoing: bool, + attestation_net_bitfield: Vec, + sync_committee_net_bitfield: Vec, + score: f64, + gossipsub_score: f64, + } + + impl Arbitrary for PeerCondition { + fn arbitrary(g: &mut G) -> Self { + let attestation_net_bitfield = { + let len = ::SubnetBitfieldLength::to_usize(); + let mut bitfield = Vec::with_capacity(len); + for _ in 0..len { + bitfield.push(bool::arbitrary(g)); + } + bitfield + }; + + let sync_committee_net_bitfield = { + let len = ::SyncCommitteeSubnetCount::to_usize(); + let mut bitfield = Vec::with_capacity(len); + for _ in 0..len { + bitfield.push(bool::arbitrary(g)); + } + bitfield + }; + + PeerCondition { + outgoing: bool::arbitrary(g), + attestation_net_bitfield, + sync_committee_net_bitfield, + score: f64::arbitrary(g), + gossipsub_score: f64::arbitrary(g), + } + } + } + + #[quickcheck] + fn prune_excess_peers(peer_conditions: Vec) -> TestResult { + let target_peer_count = DEFAULT_TARGET_PEERS; + if peer_conditions.len() < target_peer_count { + return TestResult::discard(); + } + let rt = Runtime::new().unwrap(); + + rt.block_on(async move { + let mut peer_manager = build_peer_manager(target_peer_count).await; + + // Create peers based on the randomly generated conditions. + for condition in &peer_conditions { + let peer = PeerId::random(); + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + + if condition.outgoing { + peer_manager.inject_connect_outgoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + } else { + peer_manager.inject_connect_ingoing( + &peer, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + } + + for (i, value) in condition.attestation_net_bitfield.iter().enumerate() { + attnets.set(i, *value).unwrap(); + } + + for (i, value) in condition.sync_committee_net_bitfield.iter().enumerate() { + syncnets.set(i, *value).unwrap(); + } + + let metadata = crate::rpc::MetaDataV2 { + seq_number: 0, + attnets, + syncnets, + }; + + let mut peer_db = peer_manager.network_globals.peers.write(); + let peer_info = peer_db.peer_info_mut(&peer).unwrap(); + peer_info.set_meta_data(MetaData::V2(metadata)); + peer_info.set_gossipsub_score(condition.gossipsub_score); + peer_info.add_to_score(condition.score); + + for subnet in peer_info.long_lived_subnets() { + peer_db.add_subscription(&peer, subnet); + } + } + + // Perform the heartbeat. + peer_manager.heartbeat(); + + TestResult::from_bool( + peer_manager.network_globals.connected_or_dialing_peers() + == target_peer_count.min(peer_conditions.len()), + ) + }) + } + } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 6273356b8f..555266d0e2 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -477,7 +477,7 @@ pub enum ConnectionDirection { } /// Connection Status of the peer. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub enum PeerConnectionStatus { /// The peer is connected. Connected { @@ -507,6 +507,7 @@ pub enum PeerConnectionStatus { since: Instant, }, /// The connection status has not been specified. + #[default] Unknown, } @@ -561,9 +562,3 @@ impl Serialize for PeerConnectionStatus { } } } - -impl Default for PeerConnectionStatus { - fn default() -> Self { - PeerConnectionStatus::Unknown - } -} diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index 3b67c442d7..accc0b60c5 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -223,6 +223,7 @@ impl RealScore { #[cfg(test)] pub fn set_gossipsub_score(&mut self, score: f64) { self.gossipsub_score = score; + self.update_state(); } /// Applies time-based logic such as decay rates to the score. diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 6bd4a96fb5..a46a05a8ce 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -137,6 +137,9 @@ impl Decoder for SSZSnappyInboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if self.protocol.message_name == Protocol::MetaData { + return Ok(Some(InboundRequest::MetaData(PhantomData))); + } let length = match handle_length(&mut self.inner, &mut self.len, src)? { Some(len) => len, None => return Ok(None), @@ -461,7 +464,7 @@ fn handle_v1_request( GoodbyeReason::from_ssz_bytes(decoded_buffer)?, ))), Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( - BlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, + OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, @@ -493,7 +496,7 @@ fn handle_v2_request( ) -> Result>, RPCError> { match protocol { Protocol::BlocksByRange => Ok(Some(InboundRequest::BlocksByRange( - BlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, + OldBlocksByRangeRequest::from_ssz_bytes(decoded_buffer)?, ))), Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, @@ -529,10 +532,10 @@ fn handle_v1_response( Protocol::Goodbye => Err(RPCError::InvalidData( "Goodbye RPC message has no valid response".to_string(), )), - Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Box::new( + Protocol::BlocksByRange => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping { @@ -569,31 +572,31 @@ fn handle_v2_response( })?; match protocol { Protocol::BlocksByRange => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Base => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Box::new( + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRange(Arc::new( SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( decoded_buffer, )?), )))), }, Protocol::BlocksByRoot => match fork_name { - ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Altair => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Altair(SignedBeaconBlockAltair::from_ssz_bytes( decoded_buffer, )?), )))), - ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Base => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), - ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + ForkName::Merge => Ok(Some(RPCResponse::BlocksByRoot(Arc::new( SignedBeaconBlock::Merge(SignedBeaconBlockMerge::from_ssz_bytes( decoded_buffer, )?), @@ -712,6 +715,20 @@ mod tests { } } + fn bbrange_request() -> OldBlocksByRangeRequest { + OldBlocksByRangeRequest { + start_slot: 0, + count: 10, + step: 1, + } + } + + fn bbroot_request() -> BlocksByRootRequest { + BlocksByRootRequest { + block_roots: VariableList::from(vec![Hash256::zero()]), + } + } + fn ping_message() -> Ping { Ping { data: 1 } } @@ -732,7 +749,7 @@ mod tests { } /// Encodes the given protocol response as bytes. - fn encode( + fn encode_response( protocol: Protocol, version: Version, message: RPCCodedResponse, @@ -779,7 +796,7 @@ mod tests { } /// Attempts to decode the given protocol bytes as an rpc response - fn decode( + fn decode_response( protocol: Protocol, version: Version, message: &mut BytesMut, @@ -795,21 +812,70 @@ mod tests { } /// Encodes the provided protocol message as bytes and tries to decode the encoding bytes. - fn encode_then_decode( + fn encode_then_decode_response( protocol: Protocol, version: Version, message: RPCCodedResponse, fork_name: ForkName, ) -> Result>, RPCError> { - let mut encoded = encode(protocol, version.clone(), message, fork_name)?; - decode(protocol, version, &mut encoded, fork_name) + let mut encoded = encode_response(protocol, version.clone(), message, fork_name)?; + decode_response(protocol, version, &mut encoded, fork_name) + } + + /// Verifies that requests we send are encoded in a way that we would correctly decode too. + fn encode_then_decode_request(req: OutboundRequest, fork_name: ForkName) { + let fork_context = Arc::new(fork_context(fork_name)); + let max_packet_size = max_rpc_size(&fork_context); + for protocol in req.supported_protocols() { + // Encode a request we send + let mut buf = BytesMut::new(); + let mut outbound_codec = SSZSnappyOutboundCodec::::new( + protocol.clone(), + max_packet_size, + fork_context.clone(), + ); + outbound_codec.encode(req.clone(), &mut buf).unwrap(); + + let mut inbound_codec = SSZSnappyInboundCodec::::new( + protocol.clone(), + max_packet_size, + fork_context.clone(), + ); + + let decoded = inbound_codec.decode(&mut buf).unwrap().unwrap_or_else(|| { + panic!( + "Should correctly decode the request {} over protocol {:?} and fork {}", + req, protocol, fork_name + ) + }); + match req.clone() { + OutboundRequest::Status(status) => { + assert_eq!(decoded, InboundRequest::Status(status)) + } + OutboundRequest::Goodbye(goodbye) => { + assert_eq!(decoded, InboundRequest::Goodbye(goodbye)) + } + OutboundRequest::BlocksByRange(bbrange) => { + assert_eq!(decoded, InboundRequest::BlocksByRange(bbrange)) + } + OutboundRequest::BlocksByRoot(bbroot) => { + assert_eq!(decoded, InboundRequest::BlocksByRoot(bbroot)) + } + OutboundRequest::Ping(ping) => { + assert_eq!(decoded, InboundRequest::Ping(ping)) + } + OutboundRequest::MetaData(metadata) => { + assert_eq!(decoded, InboundRequest::MetaData(metadata)) + } + } + } } // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v1() { assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::Status, Version::V1, RPCCodedResponse::Success(RPCResponse::Status(status_message())), @@ -819,7 +885,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::Ping, Version::V1, RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), @@ -829,23 +895,23 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); assert!( matches!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap_err(), @@ -855,23 +921,23 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))) ); assert!( matches!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V1, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap_err(), @@ -881,7 +947,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), @@ -891,7 +957,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), @@ -902,7 +968,7 @@ mod tests { // A MetaDataV2 still encodes as a MetaDataV1 since version is Version::V1 assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V1, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), @@ -917,7 +983,7 @@ mod tests { fn test_encode_then_decode_v2() { assert!( matches!( - encode_then_decode( + encode_then_decode_response( Protocol::Status, Version::V2, RPCCodedResponse::Success(RPCResponse::Status(status_message())), @@ -931,7 +997,7 @@ mod tests { assert!( matches!( - encode_then_decode( + encode_then_decode_response( Protocol::Ping, Version::V2, RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), @@ -944,13 +1010,13 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); @@ -959,40 +1025,40 @@ mod tests { // This is useful for checking that we allow for blocks smaller than // the current_fork's rpc limit assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( empty_base_block() )))) ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new(altair_block())))) + Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) ); let merge_block_small = merge_block_small(&fork_context(ForkName::Merge)); let merge_block_large = merge_block_large(&fork_context(ForkName::Merge)); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new( + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() ))), ForkName::Merge, ), - Ok(Some(RPCResponse::BlocksByRange(Box::new( + Ok(Some(RPCResponse::BlocksByRange(Arc::new( merge_block_small.clone() )))) ); @@ -1003,7 +1069,7 @@ mod tests { assert!( matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut encoded, @@ -1016,14 +1082,14 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))), ); @@ -1031,37 +1097,37 @@ mod tests { // This is useful for checking that we allow for blocks smaller than // the current_fork's rpc limit assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ), Ok(Some(RPCResponse::BlocksByRoot( - Box::new(empty_base_block()) + Arc::new(empty_base_block()) ))) ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(altair_block())))) + Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new( + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new( merge_block_small.clone() ))), ForkName::Merge, ), - Ok(Some(RPCResponse::BlocksByRoot(Box::new(merge_block_small)))) + Ok(Some(RPCResponse::BlocksByRoot(Arc::new(merge_block_small)))) ); let mut encoded = @@ -1070,7 +1136,7 @@ mod tests { assert!( matches!( - decode( + decode_response( Protocol::BlocksByRoot, Version::V2, &mut encoded, @@ -1084,7 +1150,7 @@ mod tests { // A MetaDataV1 still encodes as a MetaDataV2 since version is Version::V2 assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), @@ -1094,7 +1160,7 @@ mod tests { ); assert_eq!( - encode_then_decode( + encode_then_decode_response( Protocol::MetaData, Version::V2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), @@ -1110,10 +1176,10 @@ mod tests { let fork_context = fork_context(ForkName::Altair); // Removing context bytes for v2 messages should error - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Base, ) .unwrap(); @@ -1121,7 +1187,7 @@ mod tests { let _ = encoded_bytes.split_to(4); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut encoded_bytes, @@ -1131,10 +1197,10 @@ mod tests { RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), )); - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Base, ) .unwrap(); @@ -1142,7 +1208,7 @@ mod tests { let _ = encoded_bytes.split_to(4); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut encoded_bytes, @@ -1153,10 +1219,10 @@ mod tests { )); // Trying to decode a base block with altair context bytes should give ssz decoding error - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRange, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRange(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); @@ -1167,7 +1233,7 @@ mod tests { wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes, @@ -1178,10 +1244,10 @@ mod tests { )); // Trying to decode an altair block with base context bytes should give ssz decoding error - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(altair_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), ForkName::Altair, ) .unwrap(); @@ -1191,7 +1257,7 @@ mod tests { wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes, @@ -1205,7 +1271,7 @@ mod tests { let mut encoded_bytes = BytesMut::new(); encoded_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); encoded_bytes.extend_from_slice( - &encode( + &encode_response( Protocol::MetaData, Version::V2, RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), @@ -1214,7 +1280,7 @@ mod tests { .unwrap(), ); - assert!(decode( + assert!(decode_response( Protocol::MetaData, Version::V2, &mut encoded_bytes, @@ -1223,10 +1289,10 @@ mod tests { .is_err()); // Sending context bytes which do not correspond to any fork should return an error - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); @@ -1236,7 +1302,7 @@ mod tests { wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut wrong_fork_bytes, @@ -1247,10 +1313,10 @@ mod tests { )); // Sending bytes less than context bytes length should wait for more bytes by returning `Ok(None)` - let mut encoded_bytes = encode( + let mut encoded_bytes = encode_response( Protocol::BlocksByRoot, Version::V2, - RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Box::new(empty_base_block()))), + RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), ForkName::Altair, ) .unwrap(); @@ -1258,7 +1324,7 @@ mod tests { let mut part = encoded_bytes.split_to(3); assert_eq!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut part, @@ -1268,6 +1334,23 @@ mod tests { ) } + #[test] + fn test_encode_then_decode_request() { + let requests: &[OutboundRequest] = &[ + OutboundRequest::Ping(ping_message()), + OutboundRequest::Status(status_message()), + OutboundRequest::Goodbye(GoodbyeReason::Fault), + OutboundRequest::BlocksByRange(bbrange_request()), + OutboundRequest::BlocksByRoot(bbroot_request()), + OutboundRequest::MetaData(PhantomData::), + ]; + for req in requests.iter() { + for fork_name in ForkName::list_all() { + encode_then_decode_request(req.clone(), fork_name); + } + } + } + /// Test a malicious snappy encoding for a V1 `Status` message where the attacker /// sends a valid message filled with a stream of useless padding before the actual message. #[test] @@ -1319,7 +1402,7 @@ mod tests { // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( - decode(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), + decode_response(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), RPCError::InvalidData(_) )); } @@ -1376,7 +1459,7 @@ mod tests { // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( - decode( + decode_response( Protocol::BlocksByRange, Version::V2, &mut dst, @@ -1421,7 +1504,7 @@ mod tests { dst.extend_from_slice(writer.get_ref()); assert!(matches!( - decode(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), + decode_response(Protocol::Status, Version::V1, &mut dst, ForkName::Base).unwrap_err(), RPCError::InvalidData(_) )); } diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index ac39e0cecc..9ac062adc4 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -40,6 +40,9 @@ const IO_ERROR_RETRIES: u8 = 3; /// Maximum time given to the handler to perform shutdown operations. const SHUTDOWN_TIMEOUT_SECS: u8 = 15; +/// Maximum number of simultaneous inbound substreams we keep for this peer. +const MAX_INBOUND_SUBSTREAMS: usize = 32; + /// Identifier of inbound and outbound substreams from the handler's perspective. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct SubstreamId(usize); @@ -241,7 +244,7 @@ where // We now drive to completion communications already dialed/established while let Some((id, req)) = self.dial_queue.pop() { self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: req.protocol(), id, })); @@ -265,7 +268,7 @@ where self.dial_queue.push((id, req)); } _ => self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: req.protocol(), id, })), @@ -339,23 +342,32 @@ where // store requests that expect responses if expected_responses > 0 { - // Store the stream and tag the output. - let delay_key = self.inbound_substreams_delay.insert( - self.current_inbound_substream_id, - Duration::from_secs(RESPONSE_TIMEOUT), - ); - let awaiting_stream = InboundState::Idle(substream); - self.inbound_substreams.insert( - self.current_inbound_substream_id, - InboundInfo { - state: awaiting_stream, - pending_items: VecDeque::with_capacity(expected_responses as usize), - delay_key: Some(delay_key), - protocol: req.protocol(), - request_start_time: Instant::now(), - remaining_chunks: expected_responses, - }, - ); + if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS { + // Store the stream and tag the output. + let delay_key = self.inbound_substreams_delay.insert( + self.current_inbound_substream_id, + Duration::from_secs(RESPONSE_TIMEOUT), + ); + let awaiting_stream = InboundState::Idle(substream); + self.inbound_substreams.insert( + self.current_inbound_substream_id, + InboundInfo { + state: awaiting_stream, + pending_items: VecDeque::with_capacity(expected_responses as usize), + delay_key: Some(delay_key), + protocol: req.protocol(), + request_start_time: Instant::now(), + remaining_chunks: expected_responses, + }, + ); + } else { + self.events_out.push(Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: req.protocol(), + error: RPCError::HandlerRejected, + })); + return self.shutdown(None); + } } // If we received a goodbye, shutdown the connection. @@ -382,7 +394,7 @@ where // accept outbound connections only if the handler is not deactivated if matches!(self.state, HandlerState::Deactivated) { self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto, id, })); @@ -671,7 +683,7 @@ where { // if the request was still active, report back to cancel it self.events_out.push(Err(HandlerErr::Inbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: info.protocol, id: *id, })); @@ -803,7 +815,7 @@ where // the handler is deactivated. Close the stream entry.get_mut().state = OutboundSubstreamState::Closing(substream); self.events_out.push(Err(HandlerErr::Outbound { - error: RPCError::HandlerRejected, + error: RPCError::Disconnected, proto: entry.get().proto, id: entry.get().req_id, })) diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 1ac9c9b2c0..26d755a6e0 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -9,6 +9,7 @@ use ssz_types::{ VariableList, }; use std::ops::Deref; +use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -201,6 +202,16 @@ pub struct BlocksByRangeRequest { /// The number of blocks from the start slot. pub count: u64, +} + +/// Request a number of beacon block roots from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct OldBlocksByRangeRequest { + /// The starting slot to request blocks. + pub start_slot: u64, + + /// The number of blocks from the start slot. + pub count: u64, /// The step increment to receive blocks. /// @@ -227,10 +238,10 @@ pub enum RPCResponse { /// A response to a get BLOCKS_BY_RANGE request. A None response signifies the end of the /// batch. - BlocksByRange(Box>), + BlocksByRange(Arc>), /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Box>), + BlocksByRoot(Arc>), /// A PONG response to a PING request. Pong(Ping), @@ -410,6 +421,12 @@ impl std::fmt::Display for GoodbyeReason { } impl std::fmt::Display for BlocksByRangeRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Start Slot: {}, Count: {}", self.start_slot, self.count) + } +} + +impl std::fmt::Display for OldBlocksByRangeRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 17201c6cf4..7d5acc4364 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -36,7 +36,7 @@ pub struct OutboundRequestContainer { pub enum OutboundRequest { Status(StatusMessage), Goodbye(GoodbyeReason), - BlocksByRange(BlocksByRangeRequest), + BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), Ping(Ping), MetaData(PhantomData), diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 1639d17941..81960214b1 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -279,8 +279,8 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::BlocksByRange => RpcLimits::new( - ::ssz_fixed_len(), - ::ssz_fixed_len(), + ::ssz_fixed_len(), + ::ssz_fixed_len(), ), Protocol::BlocksByRoot => { RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX) @@ -415,7 +415,7 @@ where pub enum InboundRequest { Status(StatusMessage), Goodbye(GoodbyeReason), - BlocksByRange(BlocksByRangeRequest), + BlocksByRange(OldBlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), Ping(Ping), MetaData(PhantomData), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 5e1b533c60..70b14c33de 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -188,29 +188,7 @@ impl RPCRateLimiter { request: &InboundRequest, ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); - let mut tokens = request.expected_responses().max(1); - - // Increase the rate limit for blocks by range requests with large step counts. - // We count to tokens as a quadratic increase with step size. - // Using (step_size/5)^2 + 1 as penalty factor allows step sizes of 1-4 to have no penalty - // but step sizes higher than this add a quadratic penalty. - // Penalty's go: - // Step size | Penalty Factor - // 1 | 1 - // 2 | 1 - // 3 | 1 - // 4 | 1 - // 5 | 2 - // 6 | 2 - // 7 | 2 - // 8 | 3 - // 9 | 4 - // 10 | 5 - - if let InboundRequest::BlocksByRange(bbr_req) = request { - let penalty_factor = (bbr_req.step as f64 / 5.0).powi(2) as u64 + 1; - tokens *= penalty_factor; - } + let tokens = request.expected_responses().max(1); let check = |limiter: &mut Limiter| limiter.allows(time_since_start, peer_id, tokens); diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index af2656a275..a01072f8e4 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -7,6 +7,7 @@ use snap::raw::{decompress_len, Decoder, Encoder}; use ssz::{Decode, Encode}; use std::boxed::Box; use std::io::{Error, ErrorKind}; +use std::sync::Arc; use types::{ Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, @@ -17,7 +18,7 @@ use types::{ #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - BeaconBlock(Box>), + BeaconBlock(Arc>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -173,7 +174,7 @@ impl PubsubMessage { )) } }; - Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block))) + Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 3dd7ad8470..825b1088b2 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -78,18 +78,13 @@ impl std::fmt::Display for GossipKind { } /// The known encoding types for gossipsub messages. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Default)] pub enum GossipEncoding { /// Messages are encoded with SSZSnappy. + #[default] SSZSnappy, } -impl Default for GossipEncoding { - fn default() -> Self { - GossipEncoding::SSZSnappy - } -} - impl GossipTopic { pub fn new(kind: GossipKind, encoding: GossipEncoding, fork_digest: [u8; 4]) -> Self { GossipTopic { diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 5895d32d5d..90052859bc 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -167,7 +167,6 @@ fn test_blocks_by_range_chunked_rpc() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { start_slot: 0, count: messages_to_send, - step: 0, }); let spec = E::default_spec(); @@ -175,15 +174,15 @@ fn test_blocks_by_range_chunked_rpc() { // BlocksByRange Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_base = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_altair = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_merge_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -307,13 +306,12 @@ fn test_blocks_by_range_over_limit() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { start_slot: 0, count: messages_to_send, - step: 0, }); // BlocksByRange Response let full_block = merge_block_large(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_large = Response::BlocksByRange(Some(Box::new(signed_full_block))); + let rpc_response_merge_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let request_id = messages_to_send as usize; // build the sender future @@ -405,14 +403,13 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { start_slot: 0, count: messages_to_send, - step: 0, }); // BlocksByRange Response let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); // keep count of the number of messages received let mut messages_received: u64 = 0; @@ -537,14 +534,13 @@ fn test_blocks_by_range_single_empty_rpc() { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest { start_slot: 0, count: 10, - step: 0, }); // BlocksByRange Response let spec = E::default_spec(); let empty_block = BeaconBlock::empty(&spec); let empty_signed = SignedBeaconBlock::from_block(empty_block, Signature::empty()); - let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed))); + let rpc_response = Response::BlocksByRange(Some(Arc::new(empty_signed))); let messages_to_send = 1; @@ -664,15 +660,15 @@ fn test_blocks_by_root_chunked_rpc() { // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_base = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_base = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_altair = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response_merge_small = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response_merge_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; @@ -807,7 +803,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); - let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block))); + let rpc_response = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); // keep count of the number of messages received let mut messages_received = 0; diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 5aae8652e7..87c7650fb5 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -17,7 +17,6 @@ environment = { path = "../../lighthouse/environment" } beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } lighthouse_network = { path = "../lighthouse_network" } -hashset_delay = { path = "../../common/hashset_delay" } types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } @@ -44,3 +43,4 @@ if-addrs = "0.6.4" strum = "0.24.0" tokio-util = { version = "0.6.3", features = ["time"] } derivative = "2.2.0" +delay_map = "0.1.1" diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 3e25bd1442..e9a115904d 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -52,6 +52,7 @@ use lighthouse_network::{ use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use std::collections::VecDeque; +use std::future::Future; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::task::Context; @@ -65,7 +66,7 @@ use types::{ SyncCommitteeMessage, SyncSubnetId, }; use work_reprocessing_queue::{ - spawn_reprocess_scheduler, QueuedAggregate, QueuedUnaggregate, ReadyWork, + spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, }; use worker::{Toolbox, Worker}; @@ -74,7 +75,7 @@ mod tests; mod work_reprocessing_queue; mod worker; -use crate::beacon_processor::work_reprocessing_queue::QueuedBlock; +use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock; pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage}; /// The maximum size of the channel for work events to the `BeaconProcessor`. @@ -89,7 +90,7 @@ pub const MAX_WORK_EVENT_QUEUE_LEN: usize = 16_384; const MAX_IDLE_QUEUE_LEN: usize = 16_384; /// The maximum size of the channel for re-processing work events. -const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 16_384; +const MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * MAX_WORK_EVENT_QUEUE_LEN / 4; /// The maximum number of queued `Attestation` objects that will be stored before we start dropping /// them. @@ -384,7 +385,7 @@ impl WorkEvent { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, seen_timestamp: Duration, ) -> Self { Self { @@ -488,7 +489,7 @@ impl WorkEvent { /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. pub fn rpc_beacon_block( - block: Box>, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Self { @@ -498,6 +499,7 @@ impl WorkEvent { block, seen_timestamp, process_type, + should_process: true, }, } } @@ -505,7 +507,7 @@ impl WorkEvent { /// Create a new work event to import `blocks` as a beacon chain segment. pub fn chain_segment( process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>>, ) -> Self { Self { drop_during_sync: false, @@ -562,7 +564,7 @@ impl WorkEvent { impl std::convert::From> for WorkEvent { fn from(ready_work: ReadyWork) -> Self { match ready_work { - ReadyWork::Block(QueuedBlock { + ReadyWork::Block(QueuedGossipBlock { peer_id, block, seen_timestamp, @@ -574,6 +576,20 @@ impl std::convert::From> for WorkEvent { seen_timestamp, }, }, + ReadyWork::RpcBlock(QueuedRpcBlock { + block, + seen_timestamp, + process_type, + should_process, + }) => Self { + drop_during_sync: false, + work: Work::RpcBlock { + block, + seen_timestamp, + process_type, + should_process, + }, + }, ReadyWork::Unaggregate(QueuedUnaggregate { peer_id, message_id, @@ -652,7 +668,7 @@ pub enum Work { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, seen_timestamp: Duration, }, DelayedImportBlock { @@ -689,13 +705,14 @@ pub enum Work { seen_timestamp: Duration, }, RpcBlock { - block: Box>, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, + should_process: bool, }, ChainSegment { process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>>, }, Status { peer_id: PeerId, @@ -869,6 +886,7 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); + let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); @@ -1110,6 +1128,9 @@ impl BeaconProcessor { // Check exits last since our validators don't get rewards from them. } else if let Some(item) = gossip_voluntary_exit_queue.pop() { self.spawn_worker(item, toolbox); + // Handle backfill sync chain segments. + } else if let Some(item) = backfill_chain_segment.pop() { + self.spawn_worker(item, toolbox); // This statement should always be the final else statement. } else { // Let the journal know that a worker is freed and there's nothing else @@ -1195,9 +1216,15 @@ impl BeaconProcessor { sync_contribution_queue.push(work) } Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), - Work::ChainSegment { .. } => { - chain_segment_queue.push(work, work_id, &self.log) - } + Work::ChainSegment { ref process_id, .. } => match process_id { + ChainSegmentProcessId::RangeBatchId { .. } + | ChainSegmentProcessId::ParentLookup { .. } => { + chain_segment_queue.push(work, work_id, &self.log) + } + ChainSegmentProcessId::BackSyncBatchId { .. } => { + backfill_chain_segment.push(work, work_id, &self.log) + } + }, Work::Status { .. } => status_queue.push(work, work_id, &self.log), Work::BlocksByRangeRequest { .. } => { bbrange_queue.push(work, work_id, &self.log) @@ -1247,6 +1274,10 @@ impl BeaconProcessor { &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, chain_segment_queue.len() as i64, ); + metrics::set_gauge( + &metrics::BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL, + backfill_chain_segment.len() as i64, + ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_EXIT_QUEUE_TOTAL, gossip_voluntary_exit_queue.len() as i64, @@ -1291,15 +1322,6 @@ impl BeaconProcessor { let idle_tx = toolbox.idle_tx; let work_reprocessing_tx = toolbox.work_reprocessing_tx; - // Wrap the `idle_tx` in a struct that will fire the idle message whenever it is dropped. - // - // This helps ensure that the worker is always freed in the case of an early exit or panic. - // As such, this instantiation should happen as early in the function as possible. - let send_idle_on_drop = SendOnDrop { - tx: idle_tx, - log: self.log.clone(), - }; - let work_id = work.str_id(); let worker_timer = metrics::start_timer_vec(&metrics::BEACON_PROCESSOR_WORKER_TIME, &[work_id]); @@ -1309,6 +1331,16 @@ impl BeaconProcessor { &[work.str_id()], ); + // Wrap the `idle_tx` in a struct that will fire the idle message whenever it is dropped. + // + // This helps ensure that the worker is always freed in the case of an early exit or panic. + // As such, this instantiation should happen as early in the function as possible. + let send_idle_on_drop = SendOnDrop { + tx: idle_tx, + _worker_timer: worker_timer, + log: self.log.clone(), + }; + let worker_id = self.current_workers; self.current_workers = self.current_workers.saturating_add(1); @@ -1322,7 +1354,6 @@ impl BeaconProcessor { return; }; - let log = self.log.clone(); let executor = self.executor.clone(); let worker = Worker { @@ -1341,252 +1372,310 @@ impl BeaconProcessor { "worker" => worker_id, ); - let sub_executor = executor.clone(); - executor.spawn_blocking( - move || { - let _worker_timer = worker_timer; + let task_spawner = TaskSpawner { + executor: executor.clone(), + send_idle_on_drop, + }; - match work { - /* - * Individual unaggregated attestation verification. - */ - Work::GossipAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - Some(work_reprocessing_tx), - seen_timestamp, - ), - /* - * Batched unaggregated attestation verification. - */ - Work::GossipAttestationBatch { packages } => worker - .process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)), - /* - * Individual aggregated attestation verification. - */ - Work::GossipAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - Some(work_reprocessing_tx), - seen_timestamp, - ), - /* - * Batched aggregated attestation verification. - */ - Work::GossipAggregateBatch { packages } => { - worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) - } - /* - * Verification for beacon blocks received on gossip. - */ - Work::GossipBlock { + let sub_executor = executor; + match work { + /* + * Individual unaggregated attestation verification. + */ + Work::GossipAttestation { + message_id, + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + Some(work_reprocessing_tx), + seen_timestamp, + ) + }), + /* + * Batched unaggregated attestation verification. + */ + Work::GossipAttestationBatch { packages } => task_spawner.spawn_blocking(|| { + worker.process_gossip_attestation_batch(packages, Some(work_reprocessing_tx)) + }), + /* + * Individual aggregated attestation verification. + */ + Work::GossipAggregate { + message_id, + peer_id, + aggregate, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_aggregate( + message_id, + peer_id, + aggregate, + Some(work_reprocessing_tx), + seen_timestamp, + ) + }), + /* + * Batched aggregated attestation verification. + */ + Work::GossipAggregateBatch { packages } => task_spawner.spawn_blocking(|| { + worker.process_gossip_aggregate_batch(packages, Some(work_reprocessing_tx)) + }), + /* + * Verification for beacon blocks received on gossip. + */ + Work::GossipBlock { + message_id, + peer_id, + peer_client, + block, + seen_timestamp, + } => task_spawner.spawn_async(async move { + worker + .process_gossip_block( message_id, peer_id, peer_client, block, - seen_timestamp, - } => worker.process_gossip_block( - message_id, - peer_id, - peer_client, - *block, - work_reprocessing_tx.clone(), + work_reprocessing_tx, duplicate_cache, seen_timestamp, - ), - /* - * Import for blocks that we received earlier than their intended slot. - */ - Work::DelayedImportBlock { - peer_id, - block, - seen_timestamp, - } => worker.process_gossip_verified_block( - peer_id, - *block, - work_reprocessing_tx, - seen_timestamp, - ), - /* - * Voluntary exits received on gossip. - */ - Work::GossipVoluntaryExit { - message_id, - peer_id, - voluntary_exit, - } => worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit), - /* - * Proposer slashings received on gossip. - */ - Work::GossipProposerSlashing { - message_id, - peer_id, - proposer_slashing, - } => worker.process_gossip_proposer_slashing( - message_id, - peer_id, - *proposer_slashing, - ), - /* - * Attester slashings received on gossip. - */ - Work::GossipAttesterSlashing { - message_id, - peer_id, - attester_slashing, - } => worker.process_gossip_attester_slashing( - message_id, - peer_id, - *attester_slashing, - ), - /* - * Sync committee message verification. - */ - Work::GossipSyncSignature { - message_id, - peer_id, - sync_signature, - subnet_id, - seen_timestamp, - } => worker.process_gossip_sync_committee_signature( - message_id, - peer_id, - *sync_signature, - subnet_id, - seen_timestamp, - ), - /* - * Syn contribution verification. - */ - Work::GossipSyncContribution { - message_id, - peer_id, - sync_contribution, - seen_timestamp, - } => worker.process_sync_committee_contribution( - message_id, - peer_id, - *sync_contribution, - seen_timestamp, - ), - /* - * Verification for beacon blocks received during syncing via RPC. - */ - Work::RpcBlock { - block, - seen_timestamp, - process_type, - } => { - worker.process_rpc_block( - *block, - seen_timestamp, - process_type, - work_reprocessing_tx.clone(), - duplicate_cache, - ); - } - /* - * Verification for a chain segment (multiple blocks). - */ - Work::ChainSegment { process_id, blocks } => { - worker.process_chain_segment(process_id, blocks) - } - /* - * Processing of Status Messages. - */ - Work::Status { peer_id, message } => worker.process_status(peer_id, message), - /* - * Processing of range syncing requests from other peers. - */ - Work::BlocksByRangeRequest { - peer_id, - request_id, - request, - } => { - return worker.handle_blocks_by_range_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - } - /* - * Processing of blocks by roots requests from other peers. - */ - Work::BlocksByRootsRequest { - peer_id, - request_id, - request, - } => { - return worker.handle_blocks_by_root_request( - sub_executor, - send_idle_on_drop, - peer_id, - request_id, - request, - ) - } - Work::UnknownBlockAttestation { - message_id, - peer_id, - attestation, - subnet_id, - should_import, - seen_timestamp, - } => worker.process_gossip_attestation( - message_id, - peer_id, - attestation, - subnet_id, - should_import, - None, // Do not allow this attestation to be re-processed beyond this point. - seen_timestamp, - ), - Work::UnknownBlockAggregate { - message_id, - peer_id, - aggregate, - seen_timestamp, - } => worker.process_gossip_aggregate( - message_id, - peer_id, - aggregate, - None, - seen_timestamp, - ), - }; + ) + .await + }), + /* + * Import for blocks that we received earlier than their intended slot. + */ + Work::DelayedImportBlock { + peer_id, + block, + seen_timestamp, + } => task_spawner.spawn_async(worker.process_gossip_verified_block( + peer_id, + *block, + work_reprocessing_tx, + seen_timestamp, + )), + /* + * Voluntary exits received on gossip. + */ + Work::GossipVoluntaryExit { + message_id, + peer_id, + voluntary_exit, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_voluntary_exit(message_id, peer_id, *voluntary_exit) + }), + /* + * Proposer slashings received on gossip. + */ + Work::GossipProposerSlashing { + message_id, + peer_id, + proposer_slashing, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_proposer_slashing(message_id, peer_id, *proposer_slashing) + }), + /* + * Attester slashings received on gossip. + */ + Work::GossipAttesterSlashing { + message_id, + peer_id, + attester_slashing, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attester_slashing(message_id, peer_id, *attester_slashing) + }), + /* + * Sync committee message verification. + */ + Work::GossipSyncSignature { + message_id, + peer_id, + sync_signature, + subnet_id, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_sync_committee_signature( + message_id, + peer_id, + *sync_signature, + subnet_id, + seen_timestamp, + ) + }), + /* + * Syn contribution verification. + */ + Work::GossipSyncContribution { + message_id, + peer_id, + sync_contribution, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_sync_committee_contribution( + message_id, + peer_id, + *sync_contribution, + seen_timestamp, + ) + }), + /* + * Verification for beacon blocks received during syncing via RPC. + */ + Work::RpcBlock { + block, + seen_timestamp, + process_type, + should_process, + } => task_spawner.spawn_async(worker.process_rpc_block( + block, + seen_timestamp, + process_type, + work_reprocessing_tx, + duplicate_cache, + should_process, + )), + /* + * Verification for a chain segment (multiple blocks). + */ + Work::ChainSegment { process_id, blocks } => task_spawner + .spawn_async(async move { worker.process_chain_segment(process_id, blocks).await }), + /* + * Processing of Status Messages. + */ + Work::Status { peer_id, message } => { + task_spawner.spawn_blocking(move || worker.process_status(peer_id, message)) + } + /* + * Processing of range syncing requests from other peers. + */ + Work::BlocksByRangeRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { + worker.handle_blocks_by_range_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }), + /* + * Processing of blocks by roots requests from other peers. + */ + Work::BlocksByRootsRequest { + peer_id, + request_id, + request, + } => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| { + worker.handle_blocks_by_root_request( + sub_executor, + send_idle_on_drop, + peer_id, + request_id, + request, + ) + }), + Work::UnknownBlockAttestation { + message_id, + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_attestation( + message_id, + peer_id, + attestation, + subnet_id, + should_import, + None, // Do not allow this attestation to be re-processed beyond this point. + seen_timestamp, + ) + }), + Work::UnknownBlockAggregate { + message_id, + peer_id, + aggregate, + seen_timestamp, + } => task_spawner.spawn_blocking(move || { + worker.process_gossip_aggregate( + message_id, + peer_id, + aggregate, + None, + seen_timestamp, + ) + }), + }; + } +} - trace!( - log, - "Beacon processor worker done"; - "work" => work_id, - "worker" => worker_id, - ); +/// Spawns tasks that are either: +/// +/// - Blocking (i.e. intensive methods that shouldn't run on the core `tokio` executor) +/// - Async (i.e. `async` methods) +/// +/// Takes a `SendOnDrop` and ensures it is dropped after the task completes. This frees the beacon +/// processor worker so a new task can be started. +struct TaskSpawner { + executor: TaskExecutor, + send_idle_on_drop: SendOnDrop, +} - // This explicit `drop` is used to remind the programmer that this variable must - // not be dropped until the worker is complete. Dropping it early will cause the - // worker to be marked as "free" and cause an over-spawning of workers. - drop(send_idle_on_drop); +impl TaskSpawner { + /// Spawn an async task, dropping the `SendOnDrop` after the task has completed. + fn spawn_async(self, task: impl Future + Send + 'static) { + self.executor.spawn( + async { + task.await; + drop(self.send_idle_on_drop) }, WORKER_TASK_NAME, - ); + ) + } + + /// Spawn a blocking task, dropping the `SendOnDrop` after the task has completed. + fn spawn_blocking(self, task: F) + where + F: FnOnce() + Send + 'static, + { + self.executor.spawn_blocking( + || { + task(); + drop(self.send_idle_on_drop) + }, + WORKER_TASK_NAME, + ) + } + + /// Spawn a blocking task, passing the `SendOnDrop` into the task. + /// + /// ## Notes + /// + /// Users must ensure the `SendOnDrop` is dropped at the appropriate time! + pub fn spawn_blocking_with_manual_send_idle(self, task: F) + where + F: FnOnce(SendOnDrop) + Send + 'static, + { + self.executor.spawn_blocking( + || { + task(self.send_idle_on_drop); + }, + WORKER_TASK_NAME, + ) } } @@ -1602,6 +1691,8 @@ impl BeaconProcessor { /// https://doc.rust-lang.org/std/ops/trait.Drop.html#panics pub struct SendOnDrop { tx: mpsc::Sender<()>, + // The field is unused, but it's here to ensure the timer is dropped once the task has finished. + _worker_timer: Option, log: Logger, } diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 1c9d323576..05854ac1e2 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -1,14 +1,15 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(test)] -use crate::beacon_processor::work_reprocessing_queue::QUEUED_ATTESTATION_DELAY; +use crate::beacon_processor::work_reprocessing_queue::{ + QUEUED_ATTESTATION_DELAY, QUEUED_RPC_BLOCK_DELAY, +}; use crate::beacon_processor::*; use crate::{service::NetworkMessage, sync::SyncMessage}; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; -use environment::{null_logger, Environment, EnvironmentBuilder}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, rpc::methods::{MetaData, MetaDataV2}, @@ -20,7 +21,6 @@ use std::cmp; use std::iter::Iterator; use std::sync::Arc; use std::time::Duration; -use tokio::runtime::Handle; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock, @@ -45,7 +45,7 @@ const STANDARD_TIMEOUT: Duration = Duration::from_secs(10); /// Provides utilities for testing the `BeaconProcessor`. struct TestRig { chain: Arc>, - next_block: SignedBeaconBlock, + next_block: Arc>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -56,7 +56,8 @@ struct TestRig { work_journal_rx: mpsc::Receiver<&'static str>, _network_rx: mpsc::UnboundedReceiver>, _sync_rx: mpsc::UnboundedReceiver>, - environment: Option>, + duplicate_cache: DuplicateCache, + _harness: BeaconChainHarness, } /// This custom drop implementation ensures that we shut down the tokio runtime gracefully. Without @@ -65,12 +66,11 @@ impl Drop for TestRig { fn drop(&mut self) { // Causes the beacon processor to shutdown. self.beacon_processor_tx = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN).0; - self.environment.take().unwrap().shutdown_on_idle(); } } impl TestRig { - pub fn new(chain_length: u64) -> Self { + pub async fn new(chain_length: u64) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = E::default_spec(); spec.shard_committee_period = 2; @@ -84,16 +84,18 @@ impl TestRig { harness.advance_slot(); for _ in 0..chain_length { - harness.extend_chain( - 1, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.advance_slot(); } - let head = harness.chain.head().unwrap(); + let head = harness.chain.head_snapshot(); assert_eq!( harness.chain.slot().unwrap(), @@ -101,8 +103,9 @@ impl TestRig { "precondition: current slot is one after head" ); - let (next_block, next_state) = - harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let (next_block, next_state) = harness + .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) + .await; let head_state_root = head.beacon_state_root(); let attestations = harness @@ -155,11 +158,11 @@ impl TestRig { let proposer_slashing = harness.make_proposer_slashing(2); let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); - let chain = harness.chain; + let chain = harness.chain.clone(); let (network_tx, _network_rx) = mpsc::unbounded_channel(); - let log = null_logger().unwrap(); + let log = harness.logger().clone(); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(MAX_WORK_EVENT_QUEUE_LEN); let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); @@ -181,18 +184,11 @@ impl TestRig { &log, )); - let mut environment = EnvironmentBuilder::mainnet() - .null_logger() - .unwrap() - .multi_threaded_tokio_runtime() - .unwrap() - .build() - .unwrap(); - - let executor = environment.core_context().executor; + let executor = harness.runtime.task_executor.clone(); let (work_journal_tx, work_journal_rx) = mpsc::channel(16_364); + let duplicate_cache = DuplicateCache::default(); BeaconProcessor { beacon_chain: Arc::downgrade(&chain), network_tx, @@ -201,14 +197,14 @@ impl TestRig { executor, max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, - importing_blocks: Default::default(), + importing_blocks: duplicate_cache.clone(), log: log.clone(), } .spawn_manager(beacon_processor_rx, Some(work_journal_tx)); Self { chain, - next_block, + next_block: Arc::new(next_block), attestations, next_block_attestations, next_block_aggregate_attestations, @@ -219,12 +215,17 @@ impl TestRig { work_journal_rx, _network_rx, _sync_rx, - environment: Some(environment), + duplicate_cache, + _harness: harness, } } + pub async fn recompute_head(&self) { + self.chain.recompute_head_at_current_slot().await + } + pub fn head_root(&self) -> Hash256 { - self.chain.head().unwrap().beacon_block_root + self.chain.head_snapshot().beacon_block_root } pub fn enqueue_gossip_block(&self) { @@ -233,7 +234,7 @@ impl TestRig { junk_message_id(), junk_peer_id(), Client::default(), - Box::new(self.next_block.clone()), + self.next_block.clone(), Duration::from_secs(0), )) .unwrap(); @@ -241,7 +242,7 @@ impl TestRig { pub fn enqueue_rpc_block(&self) { let event = WorkEvent::rpc_beacon_block( - Box::new(self.next_block.clone()), + self.next_block.clone(), std::time::Duration::default(), BlockProcessType::ParentLookup { chain_hash: Hash256::random(), @@ -250,6 +251,15 @@ impl TestRig { self.beacon_processor_tx.try_send(event).unwrap(); } + pub fn enqueue_single_lookup_rpc_block(&self) { + let event = WorkEvent::rpc_beacon_block( + self.next_block.clone(), + std::time::Duration::default(), + BlockProcessType::SingleBlock { id: 1 }, + ); + self.beacon_processor_tx.try_send(event).unwrap(); + } + pub fn enqueue_unaggregated_attestation(&self) { let (attestation, subnet_id) = self.attestations.first().unwrap().clone(); self.beacon_processor_tx @@ -324,28 +334,16 @@ impl TestRig { .unwrap(); } - fn handle(&mut self) -> Handle { - self.environment - .as_mut() - .unwrap() - .core_context() - .executor - .handle() - .unwrap() - } - /// Assert that the `BeaconProcessor` doesn't produce any events in the given `duration`. - pub fn assert_no_events_for(&mut self, duration: Duration) { - self.handle().block_on(async { - tokio::select! { - _ = tokio::time::sleep(duration) => (), - event = self.work_journal_rx.recv() => panic!( - "received {:?} within {:?} when expecting no events", - event, - duration - ), - } - }) + pub async fn assert_no_events_for(&mut self, duration: Duration) { + tokio::select! { + _ = tokio::time::sleep(duration) => (), + event = self.work_journal_rx.recv() => panic!( + "received {:?} within {:?} when expecting no events", + event, + duration + ), + } } /// Checks that the `BeaconProcessor` event journal contains the `expected` events in the given @@ -354,57 +352,54 @@ impl TestRig { /// /// Given the described logic, `expected` must not contain `WORKER_FREED` or `NOTHING_TO_DO` /// events. - pub fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { + pub async fn assert_event_journal_contains_ordered(&mut self, expected: &[&str]) { assert!(expected .iter() .all(|ev| ev != &WORKER_FREED && ev != &NOTHING_TO_DO)); - let (events, worker_freed_remaining) = self.handle().block_on(async { - let mut events = Vec::with_capacity(expected.len()); - let mut worker_freed_remaining = expected.len(); + let mut events = Vec::with_capacity(expected.len()); + let mut worker_freed_remaining = expected.len(); - let drain_future = async { - loop { - match self.work_journal_rx.recv().await { - Some(event) if event == WORKER_FREED => { - worker_freed_remaining -= 1; - if worker_freed_remaining == 0 { - // Break when all expected events are finished. - break; - } + let drain_future = async { + loop { + match self.work_journal_rx.recv().await { + Some(event) if event == WORKER_FREED => { + worker_freed_remaining -= 1; + if worker_freed_remaining == 0 { + // Break when all expected events are finished. + break; } - Some(event) if event == NOTHING_TO_DO => { - // Ignore these. - } - Some(event) => { - events.push(event); - } - None => break, } + Some(event) if event == NOTHING_TO_DO => { + // Ignore these. + } + Some(event) => { + events.push(event); + } + None => break, } - }; - - // Drain the expected number of events from the channel, or time out and give up. - tokio::select! { - _ = tokio::time::sleep(STANDARD_TIMEOUT) => panic!( - "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?} waiting for {} `WORKER_FREED` events.", - STANDARD_TIMEOUT, - expected, - events, - worker_freed_remaining, - ), - _ = drain_future => {}, } + }; - (events, worker_freed_remaining) - }); + // Drain the expected number of events from the channel, or time out and give up. + tokio::select! { + _ = tokio::time::sleep(STANDARD_TIMEOUT) => panic!( + "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?} waiting for {} `WORKER_FREED` events.", + STANDARD_TIMEOUT, + expected, + events, + worker_freed_remaining, + ), + _ = drain_future => {}, + } assert_eq!(events, expected); assert_eq!(worker_freed_remaining, 0); } - pub fn assert_event_journal(&mut self, expected: &[&str]) { - self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT); + pub async fn assert_event_journal(&mut self, expected: &[&str]) { + self.assert_event_journal_with_timeout(expected, STANDARD_TIMEOUT) + .await } /// Assert that the `BeaconProcessor` event journal is as `expected`. @@ -413,34 +408,34 @@ impl TestRig { /// /// We won't attempt to listen for any more than `expected.len()` events. As such, it makes sense /// to use the `NOTHING_TO_DO` event to ensure that execution has completed. - pub fn assert_event_journal_with_timeout(&mut self, expected: &[&str], timeout: Duration) { - let events = self.handle().block_on(async { - let mut events = Vec::with_capacity(expected.len()); + pub async fn assert_event_journal_with_timeout( + &mut self, + expected: &[&str], + timeout: Duration, + ) { + let mut events = Vec::with_capacity(expected.len()); - let drain_future = async { - while let Some(event) = self.work_journal_rx.recv().await { - events.push(event); + let drain_future = async { + while let Some(event) = self.work_journal_rx.recv().await { + events.push(event); - // Break as soon as we collect the desired number of events. - if events.len() >= expected.len() { - break; - } + // Break as soon as we collect the desired number of events. + if events.len() >= expected.len() { + break; } - }; - - // Drain the expected number of events from the channel, or time out and give up. - tokio::select! { - _ = tokio::time::sleep(timeout) => panic!( - "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?}", - timeout, - expected, - events - ), - _ = drain_future => {}, } + }; - events - }); + // Drain the expected number of events from the channel, or time out and give up. + tokio::select! { + _ = tokio::time::sleep(timeout) => panic!( + "Timeout ({:?}) expired waiting for events. Expected {:?} but got {:?}", + timeout, + expected, + events + ), + _ = drain_future => {}, + } assert_eq!(events, expected); } @@ -455,9 +450,9 @@ fn junk_message_id() -> MessageId { } /// Blocks that arrive early should be queued for later processing. -#[test] -fn import_gossip_block_acceptably_early() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_acceptably_early() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let slot_start = rig .chain @@ -477,7 +472,8 @@ fn import_gossip_block_acceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for @@ -492,7 +488,8 @@ fn import_gossip_block_acceptably_early() { "block not yet imported" ); - rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[DELAYED_IMPORT_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.head_root(), @@ -502,9 +499,9 @@ fn import_gossip_block_acceptably_early() { } /// Blocks that are *too* early shouldn't get into the delay queue. -#[test] -fn import_gossip_block_unacceptably_early() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_unacceptably_early() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let slot_start = rig .chain @@ -524,11 +521,12 @@ fn import_gossip_block_unacceptably_early() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; // Waiting for 5 seconds is a bit arbitrary, however it *should* be long enough to ensure the // block isn't imported. - rig.assert_no_events_for(Duration::from_secs(5)); + rig.assert_no_events_for(Duration::from_secs(5)).await; assert!( rig.head_root() != rig.next_block.canonical_root(), @@ -537,9 +535,9 @@ fn import_gossip_block_unacceptably_early() { } /// Blocks that arrive on-time should be processed normally. -#[test] -fn import_gossip_block_at_current_slot() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_block_at_current_slot() { + let mut rig = TestRig::new(SMALL_CHAIN).await; assert_eq!( rig.chain.slot().unwrap(), @@ -549,7 +547,8 @@ fn import_gossip_block_at_current_slot() { rig.enqueue_gossip_block(); - rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.head_root(), @@ -559,15 +558,16 @@ fn import_gossip_block_at_current_slot() { } /// Ensure a valid attestation can be imported. -#[test] -fn import_gossip_attestation() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn import_gossip_attestation() { + let mut rig = TestRig::new(SMALL_CHAIN).await; let initial_attns = rig.chain.naive_aggregation_pool.read().num_items(); rig.enqueue_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -583,8 +583,8 @@ enum BlockImportMethod { /// Ensure that attestations that reference an unknown block get properly re-queued and /// re-processed upon importing the block. -fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { - let mut rig = TestRig::new(SMALL_CHAIN); +async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -592,7 +592,8 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -613,11 +614,12 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]); + rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_ATTESTATION]) + .await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. - rig.chain.fork_choice().unwrap(); + rig.recompute_head().await; assert_eq!( rig.head_root(), @@ -632,20 +634,20 @@ fn attestation_to_unknown_block_processed(import_method: BlockImportMethod) { ); } -#[test] -fn attestation_to_unknown_block_processed_after_gossip_block() { - attestation_to_unknown_block_processed(BlockImportMethod::Gossip) +#[tokio::test] +async fn attestation_to_unknown_block_processed_after_gossip_block() { + attestation_to_unknown_block_processed(BlockImportMethod::Gossip).await } -#[test] -fn attestation_to_unknown_block_processed_after_rpc_block() { - attestation_to_unknown_block_processed(BlockImportMethod::Rpc) +#[tokio::test] +async fn attestation_to_unknown_block_processed_after_rpc_block() { + attestation_to_unknown_block_processed(BlockImportMethod::Rpc).await } /// Ensure that attestations that reference an unknown block get properly re-queued and /// re-processed upon importing the block. -fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { - let mut rig = TestRig::new(SMALL_CHAIN); +async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Empty the op pool. rig.chain @@ -659,7 +661,8 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_attestations(), @@ -680,11 +683,12 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { } }; - rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]); + rig.assert_event_journal_contains_ordered(&[block_event, UNKNOWN_BLOCK_AGGREGATE]) + .await; // Run fork choice, since it isn't run when processing an RPC block. At runtime it is the // responsibility of the sync manager to do this. - rig.chain.fork_choice().unwrap(); + rig.recompute_head().await; assert_eq!( rig.head_root(), @@ -699,21 +703,21 @@ fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod) { ); } -#[test] -fn aggregate_attestation_to_unknown_block_processed_after_gossip_block() { - aggregate_attestation_to_unknown_block(BlockImportMethod::Gossip) +#[tokio::test] +async fn aggregate_attestation_to_unknown_block_processed_after_gossip_block() { + aggregate_attestation_to_unknown_block(BlockImportMethod::Gossip).await } -#[test] -fn aggregate_attestation_to_unknown_block_processed_after_rpc_block() { - aggregate_attestation_to_unknown_block(BlockImportMethod::Rpc) +#[tokio::test] +async fn aggregate_attestation_to_unknown_block_processed_after_rpc_block() { + aggregate_attestation_to_unknown_block(BlockImportMethod::Rpc).await } /// Ensure that attestations that reference an unknown block get properly re-queued and re-processed /// when the block is not seen. -#[test] -fn requeue_unknown_block_gossip_attestation_without_import() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn requeue_unknown_block_gossip_attestation_without_import() { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -721,7 +725,8 @@ fn requeue_unknown_block_gossip_attestation_without_import() { rig.enqueue_next_block_unaggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTATION, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -734,7 +739,8 @@ fn requeue_unknown_block_gossip_attestation_without_import() { rig.assert_event_journal_with_timeout( &[UNKNOWN_BLOCK_ATTESTATION, WORKER_FREED, NOTHING_TO_DO], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, - ); + ) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -745,9 +751,9 @@ fn requeue_unknown_block_gossip_attestation_without_import() { /// Ensure that aggregate that reference an unknown block get properly re-queued and re-processed /// when the block is not seen. -#[test] -fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { - let mut rig = TestRig::new(SMALL_CHAIN); +#[tokio::test] +async fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { + let mut rig = TestRig::new(SMALL_CHAIN).await; // Send the attestation but not the block, and check that it was not imported. @@ -755,7 +761,8 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.enqueue_next_block_aggregated_attestation(); - rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_AGGREGATE, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.naive_aggregation_pool.read().num_items(), @@ -768,7 +775,8 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { rig.assert_event_journal_with_timeout( &[UNKNOWN_BLOCK_AGGREGATE, WORKER_FREED, NOTHING_TO_DO], Duration::from_secs(1) + QUEUED_ATTESTATION_DELAY, - ); + ) + .await; assert_eq!( rig.chain.op_pool.num_attestations(), @@ -778,10 +786,10 @@ fn requeue_unknown_block_gossip_aggregated_attestation_without_import() { } /// Ensure a bunch of valid operations can be imported. -#[test] -fn import_misc_gossip_ops() { +#[tokio::test] +async fn import_misc_gossip_ops() { // Exits need the long chain so validators aren't too young to exit. - let mut rig = TestRig::new(LONG_CHAIN); + let mut rig = TestRig::new(LONG_CHAIN).await; /* * Attester slashing @@ -791,7 +799,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_attester_slashing(); - rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_ATTESTER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_attester_slashings(), @@ -807,7 +816,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_proposer_slashing(); - rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_PROPOSER_SLASHING, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_proposer_slashings(), @@ -823,7 +833,8 @@ fn import_misc_gossip_ops() { rig.enqueue_gossip_voluntary_exit(); - rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]); + rig.assert_event_journal(&[GOSSIP_VOLUNTARY_EXIT, WORKER_FREED, NOTHING_TO_DO]) + .await; assert_eq!( rig.chain.op_pool.num_voluntary_exits(), @@ -831,3 +842,33 @@ fn import_misc_gossip_ops() { "op pool should have one more exit" ); } + +/// Ensure that rpc block going to the reprocessing queue flow +/// works when the duplicate cache handle is held by another task. +#[tokio::test] +async fn test_rpc_block_reprocessing() { + let mut rig = TestRig::new(SMALL_CHAIN).await; + let next_block_root = rig.next_block.canonical_root(); + // Insert the next block into the duplicate cache manually + let handle = rig.duplicate_cache.check_and_insert(next_block_root); + rig.enqueue_single_lookup_rpc_block(); + + rig.assert_event_journal(&[RPC_BLOCK, WORKER_FREED, NOTHING_TO_DO]) + .await; + // next_block shouldn't be processed since it couldn't get the + // duplicate cache handle + assert_ne!(next_block_root, rig.head_root()); + + drop(handle); + + // The block should arrive at the beacon processor again after + // the specified delay. + tokio::time::sleep(QUEUED_RPC_BLOCK_DELAY).await; + + rig.assert_event_journal(&[RPC_BLOCK]).await; + // Add an extra delay for block processing + tokio::time::sleep(Duration::from_millis(10)).await; + // head should update to next block now since the duplicate + // cache handle was dropped. + assert_eq!(next_block_root, rig.head_root()); +} diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 33c15cf06b..efe8d3bf12 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -12,6 +12,7 @@ //! block will be re-queued until their block is imported, or until they expire. use super::MAX_SCHEDULED_WORK_QUEUE_LEN; use crate::metrics; +use crate::sync::manager::BlockProcessType; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use fnv::FnvHashMap; use futures::task::Poll; @@ -22,16 +23,18 @@ use slog::{crit, debug, error, warn, Logger}; use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::pin::Pin; +use std::sync::Arc; use std::task::Context; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SubnetId}; +use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; -const BLOCKS: &str = "blocks"; +const GOSSIP_BLOCKS: &str = "gossip_blocks"; +const RPC_BLOCKS: &str = "rpc_blocks"; const ATTESTATIONS: &str = "attestations"; /// Queue blocks for re-processing with an `ADDITIONAL_QUEUED_BLOCK_DELAY` after the slot starts. @@ -41,6 +44,9 @@ const ADDITIONAL_QUEUED_BLOCK_DELAY: Duration = Duration::from_millis(5); /// For how long to queue aggregated and unaggregated attestations for re-processing. pub const QUEUED_ATTESTATION_DELAY: Duration = Duration::from_secs(12); +/// For how long to queue rpc blocks before sending them back for reprocessing. +pub const QUEUED_RPC_BLOCK_DELAY: Duration = Duration::from_secs(3); + /// Set an arbitrary upper-bound on the number of queued blocks to avoid DoS attacks. The fact that /// we signature-verify blocks before putting them in the queue *should* protect against this, but /// it's nice to have extra protection. @@ -52,7 +58,10 @@ const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384; /// Messages that the scheduler can receive. pub enum ReprocessQueueMessage { /// A block that has been received early and we should queue for later processing. - EarlyBlock(QueuedBlock), + EarlyBlock(QueuedGossipBlock), + /// A gossip block for hash `X` is being imported, we should queue the rpc block for the same + /// hash until the gossip block is imported. + RpcBlock(QueuedRpcBlock), /// A block that was successfully processed. We use this to handle attestations for unknown /// blocks. BlockImported(Hash256), @@ -64,7 +73,8 @@ pub enum ReprocessQueueMessage { /// Events sent by the scheduler once they are ready for re-processing. pub enum ReadyWork { - Block(QueuedBlock), + Block(QueuedGossipBlock), + RpcBlock(QueuedRpcBlock), Unaggregate(QueuedUnaggregate), Aggregate(QueuedAggregate), } @@ -90,16 +100,30 @@ pub struct QueuedAggregate { } /// A block that arrived early and has been queued for later import. -pub struct QueuedBlock { +pub struct QueuedGossipBlock { pub peer_id: PeerId, pub block: Box>, pub seen_timestamp: Duration, } +/// A block that arrived for processing when the same block was being imported over gossip. +/// It is queued for later import. +pub struct QueuedRpcBlock { + pub block: Arc>, + pub process_type: BlockProcessType, + pub seen_timestamp: Duration, + /// Indicates if the beacon chain should process this block or not. + /// We use this to ignore block processing when rpc block queues are full. + pub should_process: bool, +} + /// Unifies the different messages processed by the block delay queue. enum InboundEvent { - /// A block that was queued for later processing and is ready for import. - ReadyBlock(QueuedBlock), + /// A gossip block that was queued for later processing and is ready for import. + ReadyGossipBlock(QueuedGossipBlock), + /// A rpc block that was queued because the same gossip block was being imported + /// will now be retried for import. + ReadyRpcBlock(QueuedRpcBlock), /// An aggregated or unaggregated attestation is ready for re-processing. ReadyAttestation(QueuedAttestationId), /// A `DelayQueue` returned an error. @@ -117,13 +141,15 @@ struct ReprocessQueue { /* Queues */ /// Queue to manage scheduled early blocks. - block_delay_queue: DelayQueue>, + gossip_block_delay_queue: DelayQueue>, + /// Queue to manage scheduled early blocks. + rpc_block_delay_queue: DelayQueue>, /// Queue to manage scheduled attestations. attestations_delay_queue: DelayQueue, /* Queued items */ /// Queued blocks. - queued_block_roots: HashSet, + queued_gossip_block_roots: HashSet, /// Queued aggregated attestations. queued_aggregates: FnvHashMap, DelayKey)>, /// Queued attestations. @@ -135,6 +161,7 @@ struct ReprocessQueue { /// Next attestation id, used for both aggregated and unaggregated attestations next_attestation: usize, early_block_debounce: TimeLatch, + rpc_block_debounce: TimeLatch, attestation_delay_debounce: TimeLatch, } @@ -167,12 +194,26 @@ impl Stream for ReprocessQueue { // // The sequential nature of blockchains means it is generally better to try and import all // existing blocks before new ones. - match self.block_delay_queue.poll_expired(cx) { + match self.gossip_block_delay_queue.poll_expired(cx) { Poll::Ready(Some(Ok(queued_block))) => { - return Poll::Ready(Some(InboundEvent::ReadyBlock(queued_block.into_inner()))); + return Poll::Ready(Some(InboundEvent::ReadyGossipBlock( + queued_block.into_inner(), + ))); } Poll::Ready(Some(Err(e))) => { - return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "block_queue"))); + return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "gossip_block_queue"))); + } + // `Poll::Ready(None)` means that there are no more entries in the delay queue and we + // will continue to get this result until something else is added into the queue. + Poll::Ready(None) | Poll::Pending => (), + } + + match self.rpc_block_delay_queue.poll_expired(cx) { + Poll::Ready(Some(Ok(queued_block))) => { + return Poll::Ready(Some(InboundEvent::ReadyRpcBlock(queued_block.into_inner()))); + } + Poll::Ready(Some(Err(e))) => { + return Poll::Ready(Some(InboundEvent::DelayQueueError(e, "rpc_block_queue"))); } // `Poll::Ready(None)` means that there are no more entries in the delay queue and we // will continue to get this result until something else is added into the queue. @@ -219,14 +260,16 @@ pub fn spawn_reprocess_scheduler( let mut queue = ReprocessQueue { work_reprocessing_rx, ready_work_tx, - block_delay_queue: DelayQueue::new(), + gossip_block_delay_queue: DelayQueue::new(), + rpc_block_delay_queue: DelayQueue::new(), attestations_delay_queue: DelayQueue::new(), - queued_block_roots: HashSet::new(), + queued_gossip_block_roots: HashSet::new(), queued_aggregates: FnvHashMap::default(), queued_unaggregates: FnvHashMap::default(), awaiting_attestations_per_root: HashMap::new(), next_attestation: 0, early_block_debounce: TimeLatch::default(), + rpc_block_debounce: TimeLatch::default(), attestation_delay_debounce: TimeLatch::default(), }; @@ -259,13 +302,13 @@ impl ReprocessQueue { let block_root = early_block.block.block_root; // Don't add the same block to the queue twice. This prevents DoS attacks. - if self.queued_block_roots.contains(&block_root) { + if self.queued_gossip_block_roots.contains(&block_root) { return; } if let Some(duration_till_slot) = slot_clock.duration_to_slot(block_slot) { // Check to ensure this won't over-fill the queue. - if self.queued_block_roots.len() >= MAXIMUM_QUEUED_BLOCKS { + if self.queued_gossip_block_roots.len() >= MAXIMUM_QUEUED_BLOCKS { if self.early_block_debounce.elapsed() { warn!( log, @@ -278,10 +321,10 @@ impl ReprocessQueue { return; } - self.queued_block_roots.insert(block_root); + self.queued_gossip_block_roots.insert(block_root); // Queue the block until the start of the appropriate slot, plus // `ADDITIONAL_QUEUED_BLOCK_DELAY`. - self.block_delay_queue.insert( + self.gossip_block_delay_queue.insert( early_block, duration_till_slot + ADDITIONAL_QUEUED_BLOCK_DELAY, ); @@ -311,6 +354,58 @@ impl ReprocessQueue { } } } + // A rpc block arrived for processing at the same time when a gossip block + // for the same block hash is being imported. We wait for `QUEUED_RPC_BLOCK_DELAY` + // and then send the rpc block back for processing assuming the gossip import + // has completed by then. + InboundEvent::Msg(RpcBlock(mut rpc_block)) => { + // Check to ensure this won't over-fill the queue. + if self.rpc_block_delay_queue.len() >= MAXIMUM_QUEUED_BLOCKS { + if self.rpc_block_debounce.elapsed() { + warn!( + log, + "RPC blocks queue is full"; + "queue_size" => MAXIMUM_QUEUED_BLOCKS, + "msg" => "check system clock" + ); + } + // Return the block to the beacon processor signalling to + // ignore processing for this block + rpc_block.should_process = false; + if self + .ready_work_tx + .try_send(ReadyWork::RpcBlock(rpc_block)) + .is_err() + { + error!( + log, + "Failed to send rpc block to beacon processor"; + ); + } + return; + } + + // Queue the block for 1/4th of a slot + self.rpc_block_delay_queue + .insert(rpc_block, QUEUED_RPC_BLOCK_DELAY); + } + InboundEvent::ReadyRpcBlock(queued_rpc_block) => { + debug!( + log, + "Sending rpc block for reprocessing"; + "block_root" => %queued_rpc_block.block.canonical_root() + ); + if self + .ready_work_tx + .try_send(ReadyWork::RpcBlock(queued_rpc_block)) + .is_err() + { + error!( + log, + "Failed to send rpc block to beacon processor"; + ); + } + } InboundEvent::Msg(UnknownBlockAggregate(queued_aggregate)) => { if self.attestations_delay_queue.len() >= MAXIMUM_QUEUED_ATTESTATIONS { if self.attestation_delay_debounce.elapsed() { @@ -423,10 +518,10 @@ impl ReprocessQueue { } } // A block that was queued for later processing is now ready to be processed. - InboundEvent::ReadyBlock(ready_block) => { + InboundEvent::ReadyGossipBlock(ready_block) => { let block_root = ready_block.block.block_root; - if !self.queued_block_roots.remove(&block_root) { + if !self.queued_gossip_block_roots.remove(&block_root) { // Log an error to alert that we've made a bad assumption about how this // program works, but still process the block anyway. error!( @@ -499,8 +594,13 @@ impl ReprocessQueue { metrics::set_gauge_vec( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, - &[BLOCKS], - self.block_delay_queue.len() as i64, + &[GOSSIP_BLOCKS], + self.gossip_block_delay_queue.len() as i64, + ); + metrics::set_gauge_vec( + &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, + &[RPC_BLOCKS], + self.rpc_block_delay_queue.len() as i64, ); metrics::set_gauge_vec( &metrics::BEACON_PROCESSOR_REPROCESSING_QUEUE_TOTAL, diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index b367f7f6d2..93ed1b463b 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -6,13 +6,14 @@ use beacon_chain::{ observed_operations::ObservationOutcome, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::get_block_delay_ms, - BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, ForkChoiceError, + BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError, GossipVerifiedBlock, }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use ssz::Encode; +use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; @@ -24,7 +25,7 @@ use types::{ use super::{ super::work_reprocessing_queue::{ - QueuedAggregate, QueuedBlock, QueuedUnaggregate, ReprocessQueueMessage, + QueuedAggregate, QueuedGossipBlock, QueuedUnaggregate, ReprocessQueueMessage, }, Worker, }; @@ -45,7 +46,7 @@ struct VerifiedUnaggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregate { +impl VerifiedAttestation for VerifiedUnaggregate { fn attestation(&self) -> &Attestation { &self.attestation } @@ -53,6 +54,12 @@ impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedUnaggregate fn indexed_attestation(&self) -> &IndexedAttestation { &self.indexed_attestation } + + fn into_attestation_and_indices(self) -> (Attestation, Vec) { + let attestation = *self.attestation; + let attesting_indices = self.indexed_attestation.attesting_indices.into(); + (attestation, attesting_indices) + } } /// An attestation that failed validation by the `BeaconChain`. @@ -72,7 +79,7 @@ struct VerifiedAggregate { /// This implementation allows `Self` to be imported to fork choice and other functions on the /// `BeaconChain`. -impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregate { +impl VerifiedAttestation for VerifiedAggregate { fn attestation(&self) -> &Attestation { &self.signed_aggregate.message.aggregate } @@ -80,6 +87,13 @@ impl<'a, T: BeaconChainTypes> VerifiedAttestation for VerifiedAggregate { fn indexed_attestation(&self) -> &IndexedAttestation { &self.indexed_attestation } + + /// Efficient clone-free implementation that moves out of the `Box`. + fn into_attestation_and_indices(self) -> (Attestation, Vec) { + let attestation = self.signed_aggregate.message.aggregate; + let attesting_indices = self.indexed_attestation.attesting_indices.into(); + (attestation, attesting_indices) + } } /// An attestation that failed validation by the `BeaconChain`. @@ -594,7 +608,7 @@ impl Worker { } } - if let Err(e) = self.chain.add_to_block_inclusion_pool(&verified_aggregate) { + if let Err(e) = self.chain.add_to_block_inclusion_pool(verified_aggregate) { debug!( self.log, "Attestation invalid for op pool"; @@ -636,24 +650,27 @@ impl Worker { /// /// Raises a log if there are errors. #[allow(clippy::too_many_arguments)] - pub fn process_gossip_block( + pub async fn process_gossip_block( self, message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: SignedBeaconBlock, + block: Arc>, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, seen_duration: Duration, ) { - if let Some(gossip_verified_block) = self.process_gossip_unverified_block( - message_id, - peer_id, - peer_client, - block, - reprocess_tx.clone(), - seen_duration, - ) { + if let Some(gossip_verified_block) = self + .process_gossip_unverified_block( + message_id, + peer_id, + peer_client, + block, + reprocess_tx.clone(), + seen_duration, + ) + .await + { let block_root = gossip_verified_block.block_root; if let Some(handle) = duplicate_cache.check_and_insert(block_root) { self.process_gossip_verified_block( @@ -661,7 +678,8 @@ impl Worker { gossip_verified_block, reprocess_tx, seen_duration, - ); + ) + .await; // Drop the handle to remove the entry from the cache drop(handle); } else { @@ -678,12 +696,12 @@ impl Worker { /// if it passes gossip propagation criteria, tell the network thread to forward it. /// /// Returns the `GossipVerifiedBlock` if verification passes and raises a log if there are errors. - pub fn process_gossip_unverified_block( + pub async fn process_gossip_unverified_block( &self, message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: SignedBeaconBlock, + block: Arc>, reprocess_tx: mpsc::Sender>, seen_duration: Duration, ) -> Option> { @@ -704,7 +722,7 @@ impl Worker { Some(peer_client.to_string()), ); - let verified_block = match self.chain.verify_block_for_gossip(block) { + let verified_block = match self.chain.clone().verify_block_for_gossip(block).await { Ok(verified_block) => { if block_delay >= self.chain.slot_clock.unagg_attestation_production_delay() { metrics::inc_counter(&metrics::BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL); @@ -766,14 +784,15 @@ impl Worker { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); // Prevent recurring behaviour by penalizing the peer slightly. - self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError, "gossip_block_high"); + self.gossip_penalize_peer( + peer_id, + PeerAction::HighToleranceError, + "gossip_block_high", + ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } - // TODO(merge): reconsider peer scoring for this event. - Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::RequestFailed(_))) - | Err(e @ BlockError::ExecutionPayloadError(ExecutionPayloadError::UnverifiedNonOptimisticCandidate)) - | Err(e @BlockError::ExecutionPayloadError(ExecutionPayloadError::NoExecutionConnection)) => { + Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!(self.log, "Could not verify block for gossip, ignoring the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -792,7 +811,6 @@ impl Worker { | Err(e @ BlockError::TooManySkippedSlots { .. }) | Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::InconsistentFork(_)) - // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ExecutionPayloadError(_)) // TODO(merge): reconsider peer scoring for this event. | Err(e @ BlockError::ParentExecutionPayloadInvalid { .. }) @@ -800,7 +818,11 @@ impl Worker { warn!(self.log, "Could not verify block for gossip, rejecting the block"; "error" => %e); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); - self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError, "gossip_block_low"); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "gossip_block_low", + ); return None; } }; @@ -852,7 +874,7 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_REQUEUED_TOTAL); if reprocess_tx - .try_send(ReprocessQueueMessage::EarlyBlock(QueuedBlock { + .try_send(ReprocessQueueMessage::EarlyBlock(QueuedGossipBlock { peer_id, block: Box::new(verified_block), seen_timestamp: seen_duration, @@ -887,7 +909,7 @@ impl Worker { /// Process the beacon block that has already passed gossip verification. /// /// Raises a log if there are errors. - pub fn process_gossip_verified_block( + pub async fn process_gossip_verified_block( self, peer_id: PeerId, verified_block: GossipVerifiedBlock, @@ -895,9 +917,13 @@ impl Worker { // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { - let block = Box::new(verified_block.block.clone()); + let block: Arc<_> = verified_block.block.clone(); - match self.chain.process_block(verified_block) { + match self + .chain + .process_block(verified_block, CountUnrealized::True) + .await + { Ok(block_root) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); @@ -913,25 +939,14 @@ impl Worker { ) }; - trace!( + debug!( self.log, "Gossipsub block processed"; + "block" => ?block_root, "peer_id" => %peer_id ); - match self.chain.fork_choice() { - Ok(()) => trace!( - self.log, - "Fork choice success"; - "location" => "block gossip" - ), - Err(e) => error!( - self.log, - "Fork choice failed"; - "error" => ?e, - "location" => "block gossip" - ), - } + self.chain.recompute_head_at_current_slot().await; } Err(BlockError::ParentUnknown { .. }) => { // Inform the sync manager to find parents for this block @@ -943,6 +958,13 @@ impl Worker { ); self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block)); } + Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { + debug!( + self.log, + "Failed to verify execution payload"; + "error" => %e + ); + } other => { debug!( self.log, @@ -1134,13 +1156,9 @@ impl Worker { .read() .register_gossip_attester_slashing(slashing.as_inner()); - if let Err(e) = self.chain.import_attester_slashing(slashing) { - debug!(self.log, "Error importing attester slashing"; "error" => ?e); - metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_ERROR_TOTAL); - } else { - debug!(self.log, "Successfully imported attester slashing"); - metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); - } + self.chain.import_attester_slashing(slashing); + debug!(self.log, "Successfully imported attester slashing"); + metrics::inc_counter(&metrics::BEACON_PROCESSOR_ATTESTER_SLASHING_IMPORTED_TOTAL); } /// Process the sync committee signature received from the gossip network and: diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 2d2196b9e9..8ca9c35e47 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -7,8 +7,9 @@ use itertools::process_results; use lighthouse_network::rpc::StatusMessage; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; -use slog::{debug, error, warn}; +use slog::{debug, error}; use slot_clock::SlotClock; +use std::sync::Arc; use task_executor::TaskExecutor; use types::{Epoch, EthSpec, Hash256, Slot}; @@ -62,7 +63,7 @@ impl Worker { &self, remote: &StatusMessage, ) -> Result, BeaconChainError> { - let local = self.chain.status_message()?; + let local = self.chain.status_message(); let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); let irrelevant_reason = if local.fork_digest != remote.fork_digest { @@ -134,6 +135,7 @@ impl Worker { executor.spawn( async move { let mut send_block_count = 0; + let mut send_response = true; for root in request.block_roots.iter() { match self .chain @@ -143,7 +145,7 @@ impl Worker { Ok(Some(block)) => { self.send_response( peer_id, - Response::BlocksByRoot(Some(Box::new(block))), + Response::BlocksByRoot(Some(block)), request_id, ); send_block_count += 1; @@ -156,6 +158,23 @@ impl Worker { "request_root" => ?root ); } + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { + debug!( + self.log, + "Failed to fetch execution payload for blocks by root request"; + "block_root" => ?root, + "reason" => "execution layer not synced", + ); + // send the stream terminator + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Execution layer not synced".into(), + request_id, + ); + send_response = false; + break; + } Err(e) => { debug!( self.log, @@ -172,11 +191,13 @@ impl Worker { "Received BlocksByRoot Request"; "peer" => %peer_id, "requested" => request.block_roots.len(), - "returned" => send_block_count + "returned" => %send_block_count ); // send stream termination - self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + if send_response { + self.send_response(peer_id, Response::BlocksByRoot(None), request_id); + } drop(send_on_drop); }, "load_blocks_by_root_blocks", @@ -196,16 +217,12 @@ impl Worker { "peer_id" => %peer_id, "count" => req.count, "start_slot" => req.start_slot, - "step" => req.step); + ); // Should not send more than max request blocks if req.count > MAX_REQUEST_BLOCKS { req.count = MAX_REQUEST_BLOCKS; } - if req.step == 0 { - self.goodbye_peer(peer_id, GoodbyeReason::Fault); - return warn!(self.log, "Peer sent invalid range request"; "error" => "Step sent was 0"); - } let forwards_block_root_iter = match self .chain @@ -229,29 +246,21 @@ impl Worker { Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e), }; - // Pick out the required blocks, ignoring skip-slots and stepping by the step parameter. - // - // NOTE: We don't mind if req.count * req.step overflows as it just ends the iterator early and - // the peer will get less blocks. - // The step parameter is quadratically weighted in the filter, so large values should be - // prevented before reaching this point. + // Pick out the required blocks, ignoring skip-slots. let mut last_block_root = None; let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| { - slot.as_u64() < req.start_slot.saturating_add(req.count * req.step) - }) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .step_by(req.step as usize) - .collect::>>() + iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) + // map skip slots to None + .map(|(root, _)| { + let result = if Some(root) == last_block_root { + None + } else { + Some(root) + }; + last_block_root = Some(root); + result + }) + .collect::>>() }); let block_roots = match maybe_block_roots { @@ -266,6 +275,7 @@ impl Worker { executor.spawn( async move { let mut blocks_sent = 0; + let mut send_response = true; for root in block_roots { match self.chain.get_block(&root).await { @@ -273,12 +283,12 @@ impl Worker { // Due to skip slots, blocks could be out of the range, we ensure they // are in the range before sending if block.slot() >= req.start_slot - && block.slot() < req.start_slot + req.count * req.step + && block.slot() < req.start_slot + req.count { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - response: Response::BlocksByRange(Some(Box::new(block))), + response: Response::BlocksByRange(Some(Arc::new(block))), id: request_id, }); } @@ -291,6 +301,23 @@ impl Worker { ); break; } + Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { + debug!( + self.log, + "Failed to fetch execution payload for blocks by range request"; + "block_root" => ?root, + "reason" => "execution layer not synced", + ); + // send the stream terminator + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "Execution layer not synced".into(), + request_id, + ); + send_response = false; + break; + } Err(e) => { error!( self.log, @@ -331,12 +358,15 @@ impl Worker { ); } - // send the stream terminator - self.send_network_message(NetworkMessage::SendResponse { - peer_id, - response: Response::BlocksByRange(None), - id: request_id, - }); + if send_response { + // send the stream terminator + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::BlocksByRange(None), + id: request_id, + }); + } + drop(send_on_drop); }, "load_blocks_by_range_blocks", diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 943ee9cdaf..760896e0e9 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -1,16 +1,19 @@ use std::time::Duration; use super::{super::work_reprocessing_queue::ReprocessQueueMessage, Worker}; +use crate::beacon_processor::work_reprocessing_queue::QueuedRpcBlock; use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE; use crate::beacon_processor::DuplicateCache; use crate::metrics; use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; +use beacon_chain::CountUnrealized; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, }; use lighthouse_network::PeerAction; -use slog::{debug, error, info, trace, warn}; +use slog::{debug, error, info, warn}; +use std::sync::Arc; use tokio::sync::mpsc; use types::{Epoch, Hash256, SignedBeaconBlock}; @@ -18,7 +21,7 @@ use types::{Epoch, Hash256, SignedBeaconBlock}; #[derive(Clone, Debug, PartialEq)] pub enum ChainSegmentProcessId { /// Processing Id of a range syncing batch. - RangeBatchId(ChainId, Epoch), + RangeBatchId(ChainId, Epoch, CountUnrealized), /// Processing ID for a backfill syncing batch. BackSyncBatchId(Epoch), /// Processing Id of the parent lookup of a block. @@ -35,28 +38,49 @@ struct ChainSegmentFailed { impl Worker { /// Attempt to process a block received from a direct RPC request. - pub fn process_rpc_block( + pub async fn process_rpc_block( self, - block: SignedBeaconBlock, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, + should_process: bool, ) { + if !should_process { + // Sync handles these results + self.send_sync_message(SyncMessage::BlockProcessed { + process_type, + result: crate::sync::manager::BlockProcessResult::Ignored, + }); + return; + } // Check if the block is already being imported through another source let handle = match duplicate_cache.check_and_insert(block.canonical_root()) { Some(handle) => handle, None => { - // Sync handles these results - self.send_sync_message(SyncMessage::BlockProcessed { + debug!( + self.log, + "Gossip block is being processed"; + "action" => "sending rpc block to reprocessing queue", + "block_root" => %block.canonical_root(), + ); + // Send message to work reprocess queue to retry the block + let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock { + block: block.clone(), process_type, - result: Err(BlockError::BlockIsAlreadyKnown), + seen_timestamp, + should_process: true, }); + + if reprocess_tx.try_send(reprocess_msg).is_err() { + error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block.canonical_root()) + }; return; } }; let slot = block.slot(); - let result = self.chain.process_block(block); + let result = self.chain.process_block(block, CountUnrealized::True).await; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); @@ -77,13 +101,14 @@ impl Worker { None, None, ); - self.run_fork_choice() + + self.chain.recompute_head_at_current_slot().await; } } // Sync handles these results self.send_sync_message(SyncMessage::BlockProcessed { process_type, - result: result.map(|_| ()), + result: result.into(), }); // Drop the handle to remove the entry from the cache @@ -92,19 +117,22 @@ impl Worker { /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. - pub fn process_chain_segment( + pub async fn process_chain_segment( &self, sync_type: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>>, ) { let result = match sync_type { // this a request from the range sync - ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch, count_unrealized) => { let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let sent_blocks = downloaded_blocks.len(); - match self.process_blocks(downloaded_blocks.iter()) { + match self + .process_blocks(downloaded_blocks.iter(), count_unrealized) + .await + { (_, Ok(_)) => { debug!(self.log, "Batch processed"; "batch_epoch" => epoch, @@ -113,7 +141,9 @@ impl Worker { "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, "service"=> "sync"); - BatchProcessResult::Success(sent_blocks > 0) + BatchProcessResult::Success { + was_non_empty: sent_blocks > 0, + } } (imported_blocks, Err(e)) => { debug!(self.log, "Batch processing failed"; @@ -124,10 +154,12 @@ impl Worker { "imported_blocks" => imported_blocks, "error" => %e.message, "service" => "sync"); - - BatchProcessResult::Failed { - imported_blocks: imported_blocks > 0, - peer_action: e.peer_action, + match e.peer_action { + Some(penalty) => BatchProcessResult::FaultyFailure { + imported_blocks: imported_blocks > 0, + penalty, + }, + None => BatchProcessResult::NonFaultyFailure, } } } @@ -146,7 +178,9 @@ impl Worker { "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, "service"=> "sync"); - BatchProcessResult::Success(sent_blocks > 0) + BatchProcessResult::Success { + was_non_empty: sent_blocks > 0, + } } (_, Err(e)) => { debug!(self.log, "Backfill batch processing failed"; @@ -155,9 +189,12 @@ impl Worker { "last_block_slot" => end_slot, "error" => %e.message, "service" => "sync"); - BatchProcessResult::Failed { - imported_blocks: false, - peer_action: e.peer_action, + match e.peer_action { + Some(penalty) => BatchProcessResult::FaultyFailure { + imported_blocks: false, + penalty, + }, + None => BatchProcessResult::NonFaultyFailure, } } } @@ -171,17 +208,25 @@ impl Worker { ); // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse - match self.process_blocks(downloaded_blocks.iter().rev()) { + match self + .process_blocks(downloaded_blocks.iter().rev(), CountUnrealized::True) + .await + { (imported_blocks, Err(e)) => { debug!(self.log, "Parent lookup failed"; "error" => %e.message); - BatchProcessResult::Failed { - imported_blocks: imported_blocks > 0, - peer_action: e.peer_action, + match e.peer_action { + Some(penalty) => BatchProcessResult::FaultyFailure { + imported_blocks: imported_blocks > 0, + penalty, + }, + None => BatchProcessResult::NonFaultyFailure, } } (imported_blocks, Ok(_)) => { debug!(self.log, "Parent lookup processed successfully"); - BatchProcessResult::Success(imported_blocks > 0) + BatchProcessResult::Success { + was_non_empty: imported_blocks > 0, + } } } } @@ -191,19 +236,22 @@ impl Worker { } /// Helper function to process blocks batches which only consumes the chain and blocks to process. - fn process_blocks<'a>( + async fn process_blocks<'a>( &self, - downloaded_blocks: impl Iterator>, + downloaded_blocks: impl Iterator>>, + count_unrealized: CountUnrealized, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blocks = downloaded_blocks.cloned().collect::>(); - match self.chain.process_chain_segment(blocks) { + let blocks: Vec> = downloaded_blocks.cloned().collect(); + match self + .chain + .process_chain_segment(blocks, count_unrealized) + .await + { ChainSegmentResult::Successful { imported_blocks } => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); if imported_blocks > 0 { - // Batch completed successfully with at least one block, run fork choice. - self.run_fork_choice(); + self.chain.recompute_head_at_current_slot().await; } - (imported_blocks, Ok(())) } ChainSegmentResult::Failed { @@ -213,7 +261,7 @@ impl Worker { metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_FAILED_TOTAL); let r = self.handle_failed_chain_segment(error); if imported_blocks > 0 { - self.run_fork_choice(); + self.chain.recompute_head_at_current_slot().await; } (imported_blocks, r) } @@ -223,9 +271,13 @@ impl Worker { /// Helper function to process backfill block batches which only consumes the chain and blocks to process. fn process_backfill_blocks( &self, - blocks: Vec>, + blocks: Vec>>, ) -> (usize, Result<(), ChainSegmentFailed>) { - let blinded_blocks = blocks.into_iter().map(Into::into).collect(); + let blinded_blocks = blocks + .iter() + .map(|full_block| full_block.clone_as_blinded()) + .map(Arc::new) + .collect(); match self.chain.import_historical_block_batch(blinded_blocks) { Ok(imported_blocks) => { metrics::inc_counter( @@ -335,24 +387,6 @@ impl Worker { } } - /// Runs fork-choice on a given chain. This is used during block processing after one successful - /// block import. - fn run_fork_choice(&self) { - match self.chain.fork_choice() { - Ok(()) => trace!( - self.log, - "Fork choice success"; - "location" => "batch processing" - ), - Err(e) => error!( - self.log, - "Fork choice failed"; - "error" => ?e, - "location" => "batch import error" - ), - } - } - /// Helper function to handle a `BlockError` from `process_chain_segment` fn handle_failed_chain_segment( &self, @@ -425,6 +459,34 @@ impl Worker { peer_action: None, }) } + ref err @ BlockError::ExecutionPayloadError(ref epe) => { + if !epe.penalize_peer() { + // These errors indicate an issue with the EL and not the `ChainSegment`. + // Pause the syncing while the EL recovers + debug!(self.log, + "Execution layer verification failed"; + "outcome" => "pausing sync", + "err" => ?err + ); + Err(ChainSegmentFailed { + message: format!("Execution layer offline. Reason: {:?}", err), + // Do not penalize peers for internal errors. + peer_action: None, + }) + } else { + debug!(self.log, + "Invalid execution payload"; + "error" => ?err + ); + Err(ChainSegmentFailed { + message: format!( + "Peer sent a block containing invalid execution payload. Reason: {:?}", + err + ), + peer_action: Some(PeerAction::LowToleranceError), + }) + } + } other => { debug!( self.log, "Invalid block received"; diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 283d8dfb9e..648c636acc 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -18,4 +18,6 @@ mod subnet_service; mod sync; pub use lighthouse_network::NetworkConfig; -pub use service::{NetworkMessage, NetworkService}; +pub use service::{ + NetworkMessage, NetworkReceivers, NetworkSenders, NetworkService, ValidatorSubscriptionMessage, +}; diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 02c491cb01..b4e7a3bace 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -143,10 +143,6 @@ lazy_static! { "beacon_processor_attester_slashing_imported_total", "Total number of attester slashings imported to the op pool." ); - pub static ref BEACON_PROCESSOR_ATTESTER_SLASHING_ERROR_TOTAL: Result = try_create_int_counter( - "beacon_processor_attester_slashing_error_total", - "Total number of attester slashings that raised an error during processing." - ); // Rpc blocks. pub static ref BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_rpc_block_queue_total", @@ -161,6 +157,10 @@ lazy_static! { "beacon_processor_chain_segment_queue_total", "Count of chain segments from the rpc waiting to be verified." ); + pub static ref BEACON_PROCESSOR_BACKFILL_CHAIN_SEGMENT_QUEUE_TOTAL: Result = try_create_int_gauge( + "beacon_processor_backfill_chain_segment_queue_total", + "Count of backfill chain segments from the rpc waiting to be verified." + ); pub static ref BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL: Result = try_create_int_counter( "beacon_processor_chain_segment_success_total", "Total number of chain segments successfully processed." @@ -252,6 +252,20 @@ lazy_static! { "Gossipsub sync_committee errors per error type", &["type"] ); + + /* + * Network queue metrics + */ + pub static ref NETWORK_RECEIVE_EVENTS: Result = try_create_int_counter_vec( + "network_receive_events", + "Count of events received by the channel to the network service", + &["type"] + ); + pub static ref NETWORK_RECEIVE_TIMES: Result = try_create_histogram_vec( + "network_receive_times", + "Time taken for network to handle an event sent to the network service.", + &["type"] + ); } lazy_static! { @@ -297,13 +311,18 @@ lazy_static! { /* * Block Delay Metrics */ - pub static ref BEACON_BLOCK_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result = try_create_histogram( + pub static ref BEACON_BLOCK_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_gossip_propagation_verification_delay_time", "Duration between when the block is received and when it is verified for propagation.", + // [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] + decimal_buckets(-3,-1) ); - pub static ref BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram( + pub static ref BEACON_BLOCK_GOSSIP_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_gossip_slot_start_delay_time", "Duration between when the block is received and the start of the slot it belongs to.", + // [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + decimal_buckets(-1,2) + ); pub static ref BEACON_BLOCK_GOSSIP_ARRIVED_LATE_TOTAL: Result = try_create_int_counter( "beacon_block_gossip_arrived_late_total", diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index b8db9c17f8..ce11cbdcef 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -2,9 +2,10 @@ use crate::beacon_processor::{ BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, }; use crate::service::{NetworkMessage, RequestId}; +use crate::status::status_message; use crate::sync::manager::RequestId as SyncId; use crate::sync::SyncMessage; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::*; use lighthouse_network::{ Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response, @@ -114,11 +115,10 @@ impl Processor { /// Called when we first connect to a peer, or when the PeerManager determines we need to /// re-status. pub fn send_status(&mut self, peer_id: PeerId) { - if let Ok(status_message) = status_message(&self.chain) { - debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); - self.network - .send_processor_request(peer_id, Request::Status(status_message)); - } + let status_message = status_message(&self.chain); + debug!(self.log, "Sending Status Request"; "peer" => %peer_id, &status_message); + self.network + .send_processor_request(peer_id, Request::Status(status_message)); } /// Handle a `Status` request. @@ -132,12 +132,12 @@ impl Processor { ) { debug!(self.log, "Received Status Request"; "peer_id" => %peer_id, &status); - // ignore status responses if we are shutting down - if let Ok(status_message) = status_message(&self.chain) { - // Say status back. - self.network - .send_response(peer_id, Response::Status(status_message), request_id); - } + // Say status back. + self.network.send_response( + peer_id, + Response::Status(status_message(&self.chain)), + request_id, + ); self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status)) } @@ -178,7 +178,7 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>>, + beacon_block: Option>>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { @@ -209,7 +209,7 @@ impl Processor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>>, + beacon_block: Option>>, ) { let request_id = match request_id { RequestId::Sync(sync_id) => match sync_id { @@ -244,7 +244,7 @@ impl Processor { message_id: MessageId, peer_id: PeerId, peer_client: Client, - block: Box>, + block: Arc>, ) { self.send_beacon_processor_work(BeaconWorkEvent::gossip_beacon_block( message_id, @@ -370,22 +370,6 @@ impl Processor { } } -/// Build a `StatusMessage` representing the state of the given `beacon_chain`. -pub(crate) fn status_message( - beacon_chain: &BeaconChain, -) -> Result { - let head_info = beacon_chain.head_info()?; - let fork_digest = beacon_chain.enr_fork_id().fork_digest; - - Ok(StatusMessage { - fork_digest, - finalized_root: head_info.finalized_checkpoint.root, - finalized_epoch: head_info.finalized_checkpoint.epoch, - head_root: head_info.block_root, - head_slot: head_info.slot, - }) -} - /// Wraps a Network Channel to employ various RPC related network functionality for the /// processor. #[derive(Clone)] @@ -425,22 +409,6 @@ impl HandlerNetworkContext { response, }) } - - /// Sends an error response to the network task. - pub fn _send_error_response( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - error: RPCResponseErrorCode, - reason: String, - ) { - self.inform_network(NetworkMessage::SendErrorResponse { - peer_id, - error, - id, - reason, - }) - } } fn timestamp_now() -> Duration { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a8995de2e5..f5e32dcff0 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -7,7 +7,7 @@ use crate::{ subnet_service::{AttestationService, SubnetServiceMessage}, NetworkConfig, }; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; @@ -26,12 +26,13 @@ use lighthouse_network::{ use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; +use strum::IntoStaticStr; use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use types::{ - ChainSpec, EthSpec, ForkContext, RelativeEpoch, Slot, SubnetId, SyncCommitteeSubscription, - SyncSubnetId, Unsigned, ValidatorSubscription, + ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, + Unsigned, ValidatorSubscription, }; mod tests; @@ -42,6 +43,9 @@ const METRIC_UPDATE_INTERVAL: u64 = 5; const SUBSCRIBE_DELAY_SLOTS: u64 = 2; /// Delay after a fork where we unsubscribe from pre-fork topics. const UNSUBSCRIBE_DELAY_EPOCHS: u64 = 2; +/// Size of the queue for validator subnet subscriptions. The number is chosen so that we may be +/// able to run tens of thousands of validators on one BN. +const VALIDATOR_SUBSCRIPTION_MESSAGE_QUEUE_SIZE: usize = 65_536; /// Application level requests sent to the network. #[derive(Debug, Clone, Copy)] @@ -51,15 +55,9 @@ pub enum RequestId { } /// Types of messages that the network service can receive. -#[derive(Debug)] +#[derive(Debug, IntoStaticStr)] +#[strum(serialize_all = "snake_case")] pub enum NetworkMessage { - /// Subscribes a list of validators to specific slots for attestation duties. - AttestationSubscribe { - subscriptions: Vec, - }, - SyncCommitteeSubscribe { - subscriptions: Vec, - }, /// Subscribes the beacon node to the core gossipsub topics. We do this when we are either /// synced or close to the head slot. SubscribeCoreTopics, @@ -115,6 +113,59 @@ pub enum NetworkMessage { }, } +/// Messages triggered by validators that may trigger a subscription to a subnet. +/// +/// These messages can be very numerous with large validator counts (hundreds of thousands per +/// minute). Therefore we separate them from the separated from the `NetworkMessage` to provide +/// fairness regarding message processing. +#[derive(Debug, IntoStaticStr)] +#[strum(serialize_all = "snake_case")] +pub enum ValidatorSubscriptionMessage { + /// Subscribes a list of validators to specific slots for attestation duties. + AttestationSubscribe { + subscriptions: Vec, + }, + SyncCommitteeSubscribe { + subscriptions: Vec, + }, +} + +#[derive(Clone)] +pub struct NetworkSenders { + network_send: mpsc::UnboundedSender>, + validator_subscription_send: mpsc::Sender, +} + +pub struct NetworkReceivers { + pub network_recv: mpsc::UnboundedReceiver>, + pub validator_subscription_recv: mpsc::Receiver, +} + +impl NetworkSenders { + pub fn new() -> (Self, NetworkReceivers) { + let (network_send, network_recv) = mpsc::unbounded_channel::>(); + let (validator_subscription_send, validator_subscription_recv) = + mpsc::channel(VALIDATOR_SUBSCRIPTION_MESSAGE_QUEUE_SIZE); + let senders = Self { + network_send, + validator_subscription_send, + }; + let receivers = NetworkReceivers { + network_recv, + validator_subscription_recv, + }; + (senders, receivers) + } + + pub fn network_send(&self) -> mpsc::UnboundedSender> { + self.network_send.clone() + } + + pub fn validator_subscription_send(&self) -> mpsc::Sender { + self.validator_subscription_send.clone() + } +} + /// Service that handles communication between internal services and the `lighthouse_network` network service. pub struct NetworkService { /// A reference to the underlying beacon chain. @@ -127,6 +178,8 @@ pub struct NetworkService { sync_committee_service: SyncCommitteeService, /// The receiver channel for lighthouse to communicate with the network service. network_recv: mpsc::UnboundedReceiver>, + /// The receiver channel for lighthouse to send validator subscription requests. + validator_subscription_recv: mpsc::Receiver, /// The sending channel for the network service to send messages to be routed throughout /// lighthouse. router_send: mpsc::UnboundedSender>, @@ -168,18 +221,15 @@ impl NetworkService { config: &NetworkConfig, executor: task_executor::TaskExecutor, gossipsub_registry: Option<&'_ mut Registry>, - ) -> error::Result<( - Arc>, - mpsc::UnboundedSender>, - )> { + ) -> error::Result<(Arc>, NetworkSenders)> { let network_log = executor.log().clone(); - // build the network channel - let (network_send, network_recv) = mpsc::unbounded_channel::>(); + // build the channels for external comms + let (network_senders, network_recievers) = NetworkSenders::new(); // try and construct UPnP port mappings if required. let upnp_config = crate::nat::UPnPConfig::from(config); let upnp_log = network_log.new(o!("service" => "UPnP")); - let upnp_network_send = network_send.clone(); + let upnp_network_send = network_senders.network_send(); if config.upnp_enabled { executor.spawn_blocking( move || { @@ -244,7 +294,7 @@ impl NetworkService { let router_send = Router::spawn( beacon_chain.clone(), network_globals.clone(), - network_send.clone(), + network_senders.network_send(), executor.clone(), network_log.clone(), )?; @@ -263,6 +313,11 @@ impl NetworkService { // create a timer for updating gossipsub parameters let gossipsub_parameter_update = tokio::time::interval(Duration::from_secs(60)); + let NetworkReceivers { + network_recv, + validator_subscription_recv, + } = network_recievers; + // create the network service and spawn the task let network_log = network_log.new(o!("service" => "network")); let network_service = NetworkService { @@ -271,6 +326,7 @@ impl NetworkService { attestation_service, sync_committee_service, network_recv, + validator_subscription_recv, router_send, store, network_globals: network_globals.clone(), @@ -290,7 +346,7 @@ impl NetworkService { network_service.spawn_service(executor); - Ok((network_globals, network_send)) + Ok((network_globals, network_senders)) } /// Returns the required fork digests that gossipsub needs to subscribe to based on the current slot. @@ -358,11 +414,14 @@ impl NetworkService { // handle a message sent to the network Some(msg) = self.network_recv.recv() => self.on_network_msg(msg, &mut shutdown_sender).await, + // handle a message from a validator requesting a subscription to a subnet + Some(msg) = self.validator_subscription_recv.recv() => self.on_validator_subscription_msg(msg).await, + // process any attestation service events Some(msg) = self.attestation_service.next() => self.on_attestation_service_msg(msg), // process any sync committee service events - Some(msg) = self.sync_committee_service.next() => self.on_sync_commitee_service_message(msg), + Some(msg) = self.sync_committee_service.next() => self.on_sync_committee_service_message(msg), event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, @@ -505,6 +564,9 @@ impl NetworkService { msg: NetworkMessage, shutdown_sender: &mut Sender, ) { + metrics::inc_counter_vec(&metrics::NETWORK_RECEIVE_EVENTS, &[(&msg).into()]); + let _timer = metrics::start_timer_vec(&metrics::NETWORK_RECEIVE_TIMES, &[(&msg).into()]); + match msg { NetworkMessage::SendRequest { peer_id, @@ -606,22 +668,6 @@ impl NetworkService { reason, source, } => self.libp2p.goodbye_peer(&peer_id, reason, source), - NetworkMessage::AttestationSubscribe { subscriptions } => { - if let Err(e) = self - .attestation_service - .validator_subscriptions(subscriptions) - { - warn!(self.log, "Attestation validator subscription failed"; "error" => e); - } - } - NetworkMessage::SyncCommitteeSubscribe { subscriptions } => { - if let Err(e) = self - .sync_committee_service - .validator_subscriptions(subscriptions) - { - warn!(self.log, "Sync committee calidator subscription failed"; "error" => e); - } - } NetworkMessage::SubscribeCoreTopics => { if self.shutdown_after_sync { if let Err(e) = shutdown_sender @@ -704,31 +750,36 @@ impl NetworkService { } } + /// Handle a message sent to the network service. + async fn on_validator_subscription_msg(&mut self, msg: ValidatorSubscriptionMessage) { + match msg { + ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions } => { + if let Err(e) = self + .attestation_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Attestation validator subscription failed"; "error" => e); + } + } + ValidatorSubscriptionMessage::SyncCommitteeSubscribe { subscriptions } => { + if let Err(e) = self + .sync_committee_service + .validator_subscriptions(subscriptions) + { + warn!(self.log, "Sync committee calidator subscription failed"; "error" => e); + } + } + } + } + fn update_gossipsub_parameters(&mut self) { if let Ok(slot) = self.beacon_chain.slot() { - if let Some(active_validators) = self + let active_validators_opt = self .beacon_chain - .with_head(|head| { - Ok::<_, BeaconChainError>( - head.beacon_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .map(|indices| indices.len()) - .ok() - .or_else(|| { - // if active validator cached was not build we count the - // active validators - self.beacon_chain.epoch().ok().map(|current_epoch| { - head.beacon_state - .validators() - .iter() - .filter(|validator| validator.is_active_at(current_epoch)) - .count() - }) - }), - ) - }) - .unwrap_or(None) - { + .canonical_head + .cached_head() + .active_validator_count(); + if let Some(active_validators) = active_validators_opt { if self .libp2p .swarm @@ -742,6 +793,14 @@ impl NetworkService { "active_validators" => active_validators ); } + } else { + // This scenario will only happen if the caches on the cached canonical head aren't + // built. That should never be the case. + error!( + self.log, + "Active validator count unavailable"; + "info" => "please report this bug" + ); } } } @@ -783,7 +842,7 @@ impl NetworkService { } } - fn on_sync_commitee_service_message(&mut self, msg: SubnetServiceMessage) { + fn on_sync_committee_service_message(&mut self, msg: SubnetServiceMessage) { match msg { SubnetServiceMessage::Subscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index ade490e00e..865f8ee933 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,4 +1,5 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use types::{EthSpec, Hash256}; use lighthouse_network::rpc::StatusMessage; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. @@ -6,20 +7,33 @@ use lighthouse_network::rpc::StatusMessage; /// NOTE: The purpose of this is simply to obtain a `StatusMessage` from the `BeaconChain` without /// polluting/coupling the type with RPC concepts. pub trait ToStatusMessage { - fn status_message(&self) -> Result; + fn status_message(&self) -> StatusMessage; } impl ToStatusMessage for BeaconChain { - fn status_message(&self) -> Result { - let head_info = self.head_info()?; - let fork_digest = self.enr_fork_id().fork_digest; - - Ok(StatusMessage { - fork_digest, - finalized_root: head_info.finalized_checkpoint.root, - finalized_epoch: head_info.finalized_checkpoint.epoch, - head_root: head_info.block_root, - head_slot: head_info.slot, - }) + fn status_message(&self) -> StatusMessage { + status_message(self) + } +} + +/// Build a `StatusMessage` representing the state of the given `beacon_chain`. +pub(crate) fn status_message(beacon_chain: &BeaconChain) -> StatusMessage { + let fork_digest = beacon_chain.enr_fork_id().fork_digest; + let cached_head = beacon_chain.canonical_head.cached_head(); + let mut finalized_checkpoint = cached_head.finalized_checkpoint(); + + // Alias the genesis checkpoint root to `0x00`. + let spec = &beacon_chain.spec; + let genesis_epoch = spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch()); + if finalized_checkpoint.epoch == genesis_epoch { + finalized_checkpoint.root = Hash256::zero(); + } + + StatusMessage { + fork_digest, + finalized_root: finalized_checkpoint.root, + finalized_epoch: finalized_checkpoint.epoch, + head_root: cached_head.head_block_root(), + head_slot: cached_head.head_slot(), } } diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 2b0fe6f55a..ecca3c9682 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -3,19 +3,20 @@ //! determines whether attestations should be aggregated and/or passed to the beacon node. use super::SubnetServiceMessage; -use std::collections::{HashMap, HashSet, VecDeque}; +#[cfg(test)] +use std::collections::HashSet; +use std::collections::{HashMap, VecDeque}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use std::time::{Duration, Instant}; - -use futures::prelude::*; -use rand::seq::SliceRandom; -use slog::{debug, error, o, trace, warn}; +use std::time::Duration; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use hashset_delay::HashSetDelay; +use delay_map::{HashMapDelay, HashSetDelay}; +use futures::prelude::*; use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; +use rand::seq::SliceRandom; +use slog::{debug, error, o, trace, warn}; use slot_clock::SlotClock; use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; @@ -24,20 +25,29 @@ use crate::metrics; /// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the /// slot is less than this number, skip the peer discovery process. /// Subnet discovery query takes at most 30 secs, 2 slots take 24s. -const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; -/// The time (in slots) before a last seen validator is considered absent and we unsubscribe from the random -/// gossip topics that we subscribed to due to the validator connection. -const LAST_SEEN_VALIDATOR_TIMEOUT: u32 = 150; +pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2; +/// The time (in slots) before a last seen validator is considered absent and we unsubscribe from +/// the random gossip topics that we subscribed to due to the validator connection. +const LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS: u32 = 150; /// The fraction of a slot that we subscribe to a subnet before the required slot. /// -/// Note: The time is calculated as `time = seconds_per_slot / ADVANCE_SUBSCRIPTION_TIME`. -const ADVANCE_SUBSCRIBE_TIME: u32 = 3; -/// The default number of slots before items in hash delay sets used by this class should expire. -/// 36s at 12s slot time -const DEFAULT_EXPIRATION_TIMEOUT: u32 = 3; +/// Currently a whole slot ahead. +const ADVANCE_SUBSCRIBE_SLOT_FRACTION: u32 = 1; + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] +pub(crate) enum SubscriptionKind { + /// Long lived subscriptions. + /// + /// These have a longer duration and are advertised in our ENR. + LongLived, + /// Short lived subscriptions. + /// + /// Subscribing to these subnets has a short duration and we don't advertise it in our ENR. + ShortLived, +} /// A particular subnet at a given slot. -#[derive(PartialEq, Eq, Hash, Clone, Debug)] +#[derive(PartialEq, Eq, Hash, Clone, Debug, Copy)] pub struct ExactSubnet { /// The `SubnetId` associated with this subnet. pub subnet_id: SubnetId, @@ -52,17 +62,22 @@ pub struct AttestationService { /// A reference to the beacon chain to process received attestations. pub(crate) beacon_chain: Arc>, - /// The collection of currently subscribed random subnets mapped to their expiry deadline. - pub(crate) random_subnets: HashSetDelay, + /// Subnets we are currently subscribed to as short lived subscriptions. + /// + /// Once they expire, we unsubscribe from these. + short_lived_subscriptions: HashMapDelay, - /// The collection of all currently subscribed subnets (long-lived **and** short-lived). - subscriptions: HashSet, + /// Subnets we are currently subscribed to as long lived subscriptions. + /// + /// We advertise these in our ENR. When these expire, the subnet is removed from our ENR. + long_lived_subscriptions: HashMapDelay, - /// A collection of timeouts for when to unsubscribe from a shard subnet. - unsubscriptions: HashSetDelay, + /// Short lived subscriptions that need to be done in the future. + scheduled_short_lived_subscriptions: HashSetDelay, - /// A collection timeouts to track the existence of aggregate validator subscriptions at an `ExactSubnet`. - aggregate_validators_on_subnet: HashSetDelay, + /// A collection timeouts to track the existence of aggregate validator subscriptions at an + /// `ExactSubnet`. + aggregate_validators_on_subnet: Option>, /// A collection of seen validators. These dictate how many random subnets we should be /// subscribed to. As these time out, we unsubscribe for the required random subnets and update @@ -79,8 +94,8 @@ pub struct AttestationService { /// We are always subscribed to all subnets. subscribe_all_subnets: bool, - /// We process and aggregate all attestations on subscribed subnets. - import_all_attestations: bool, + /// For how many slots we subscribe to long lived subnets. + long_lived_subnet_subscription_slots: u64, /// The logger for the attestation service. log: slog::Logger, @@ -96,34 +111,36 @@ impl AttestationService { ) -> Self { let log = log.new(o!("service" => "attestation_service")); - // calculate the random subnet duration from the spec constants + // Calculate the random subnet duration from the spec constants. let spec = &beacon_chain.spec; let slot_duration = beacon_chain.slot_clock.slot_duration(); - let random_subnet_duration_millis = spec + let long_lived_subnet_subscription_slots = spec .epochs_per_random_subnet_subscription - .saturating_mul(T::EthSpec::slots_per_epoch()) - .saturating_mul(slot_duration.as_millis() as u64); + .saturating_mul(T::EthSpec::slots_per_epoch()); + let long_lived_subscription_duration = Duration::from_millis( + slot_duration.as_millis() as u64 * long_lived_subnet_subscription_slots, + ); - // Panics on overflow. Ensure LAST_SEEN_VALIDATOR_TIMEOUT is not too large. + // Panics on overflow. Ensure LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS is not too large. let last_seen_val_timeout = slot_duration - .checked_mul(LAST_SEEN_VALIDATOR_TIMEOUT) + .checked_mul(LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS) .expect("LAST_SEEN_VALIDATOR_TIMEOUT must not be ridiculously large"); - let default_timeout = slot_duration - .checked_mul(DEFAULT_EXPIRATION_TIMEOUT) - .expect("DEFAULT_EXPIRATION_TIMEOUT must not be ridiculoustly large"); + let track_validators = !config.import_all_attestations; + let aggregate_validators_on_subnet = + track_validators.then(|| HashSetDelay::new(slot_duration)); AttestationService { events: VecDeque::with_capacity(10), beacon_chain, - random_subnets: HashSetDelay::new(Duration::from_millis(random_subnet_duration_millis)), - subscriptions: HashSet::new(), - unsubscriptions: HashSetDelay::new(default_timeout), - aggregate_validators_on_subnet: HashSetDelay::new(default_timeout), + short_lived_subscriptions: HashMapDelay::new(slot_duration), + long_lived_subscriptions: HashMapDelay::new(long_lived_subscription_duration), + scheduled_short_lived_subscriptions: HashSetDelay::default(), + aggregate_validators_on_subnet, known_validators: HashSetDelay::new(last_seen_val_timeout), waker: None, - subscribe_all_subnets: config.subscribe_all_subnets, - import_all_attestations: config.import_all_attestations, discovery_disabled: config.disable_discovery, + subscribe_all_subnets: config.subscribe_all_subnets, + long_lived_subnet_subscription_slots, log, } } @@ -134,10 +151,25 @@ impl AttestationService { if self.subscribe_all_subnets { self.beacon_chain.spec.attestation_subnet_count as usize } else { - self.subscriptions.len() + self.short_lived_subscriptions + .keys() + .chain(self.long_lived_subscriptions.keys()) + .collect::>() + .len() } } + /// Give access to the current subscriptions for testing purposes. + #[cfg(test)] + pub(crate) fn subscriptions( + &self, + subscription_kind: SubscriptionKind, + ) -> &HashMapDelay { + match subscription_kind { + SubscriptionKind::LongLived => &self.long_lived_subscriptions, + SubscriptionKind::ShortLived => &self.short_lived_subscriptions, + } + } /// Processes a list of validator subscriptions. /// /// This will: @@ -158,7 +190,6 @@ impl AttestationService { let mut subnets_to_discover: HashMap = HashMap::new(); for subscription in subscriptions { metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_REQUESTS); - //NOTE: We assume all subscriptions have been verified before reaching this service // Registers the validator with the attestation service. // This will subscribe to long-lived random subnets if required. @@ -205,8 +236,7 @@ impl AttestationService { if subscription.is_aggregator { metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); - // set the subscription timer to subscribe to the next subnet if required - if let Err(e) = self.subscribe_to_subnet(exact_subnet.clone()) { + if let Err(e) = self.subscribe_to_subnet(exact_subnet) { warn!(self.log, "Subscription to subnet error"; "error" => e, @@ -234,10 +264,6 @@ impl AttestationService { }; } - // pre-emptively wake the thread to check for new events - if let Some(waker) = &self.waker { - waker.wake_by_ref(); - } Ok(()) } @@ -248,19 +274,27 @@ impl AttestationService { subnet: SubnetId, attestation: &Attestation, ) -> bool { - if self.import_all_attestations { - return true; - } - - let exact_subnet = ExactSubnet { - subnet_id: subnet, - slot: attestation.data.slot, - }; - self.aggregate_validators_on_subnet.contains(&exact_subnet) + self.aggregate_validators_on_subnet + .as_ref() + .map(|tracked_vals| { + tracked_vals.contains_key(&ExactSubnet { + subnet_id: subnet, + slot: attestation.data.slot, + }) + }) + .unwrap_or(true) } /* Internal private functions */ + /// Adds an event to the event queue and notifies that this service is ready to be polled + /// again. + fn queue_event(&mut self, ev: SubnetServiceMessage) { + self.events.push_back(ev); + if let Some(waker) = &self.waker { + waker.wake_by_ref() + } + } /// Checks if there are currently queued discovery requests and the time required to make the /// request. /// @@ -277,12 +311,13 @@ impl AttestationService { let discovery_subnets: Vec = exact_subnets .filter_map(|exact_subnet| { - // check if there is enough time to perform a discovery lookup + // Check if there is enough time to perform a discovery lookup. if exact_subnet.slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { - // if the slot is more than epoch away, add an event to start looking for peers - // add one slot to ensure we keep the peer for the subscription slot + // Send out an event to start looking for peers. + // Require the peer for an additional slot to ensure we keep the peer for the + // duration of the subscription. let min_ttl = self .beacon_chain .slot_clock @@ -305,244 +340,279 @@ impl AttestationService { .collect(); if !discovery_subnets.is_empty() { - self.events - .push_back(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); + self.queue_event(SubnetServiceMessage::DiscoverPeers(discovery_subnets)); } Ok(()) } - /// Checks the current random subnets and subscriptions to determine if a new subscription for this - /// subnet is required for the given slot. - /// - /// If required, adds a subscription event and an associated unsubscription event. - fn subscribe_to_subnet(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { - // initialise timing variables - let current_slot = self - .beacon_chain - .slot_clock - .now() - .ok_or("Could not get the current slot")?; + // Subscribes to the subnet if it should be done immediately, or schedules it if required. + fn subscribe_to_subnet( + &mut self, + ExactSubnet { subnet_id, slot }: ExactSubnet, + ) -> Result<(), &'static str> { + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - // Calculate the duration to the unsubscription event. - // There are two main cases. Attempting to subscribe to the current slot and all others. - let expected_end_subscription_duration = if current_slot >= exact_subnet.slot { - self.beacon_chain + // Calculate how long before we need to subscribe to the subnet. + let time_to_subscription_start = { + // The short time we schedule the subscription before it's actually required. This + // ensures we are subscribed on time, and allows consecutive subscriptions to the same + // subnet to overlap, reducing subnet churn. + let advance_subscription_duration = slot_duration / ADVANCE_SUBSCRIBE_SLOT_FRACTION; + // The time to the required slot. + let time_to_subscription_slot = self + .beacon_chain .slot_clock - .duration_to_next_slot() - .ok_or("Unable to determine duration to next slot")? - } else { - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - - // the duration until we no longer need this subscription. We assume a single slot is - // sufficient. - self.beacon_chain - .slot_clock - .duration_to_slot(exact_subnet.slot) - .ok_or("Unable to determine duration to subscription slot")? - + slot_duration + .duration_to_slot(slot) + .unwrap_or_default(); // If this is a past slot we will just get a 0 duration. + time_to_subscription_slot.saturating_sub(advance_subscription_duration) }; - // Regardless of whether or not we have already subscribed to a subnet, track the expiration - // of aggregate validator subscriptions to exact subnets so we know whether or not to drop - // attestations for a given subnet + slot - self.aggregate_validators_on_subnet - .insert_at(exact_subnet.clone(), expected_end_subscription_duration); - - // Checks on current subscriptions - // Note: We may be connected to a long-lived random subnet. In this case we still add the - // subscription timeout and check this case when the timeout fires. This is because a - // long-lived random subnet can be unsubscribed at any time when a validator becomes - // in-active. This case is checked on the subscription event (see `handle_subscriptions`). - - // Return if we already have a subscription for this subnet_id and slot - if self.unsubscriptions.contains(&exact_subnet) || self.subscribe_all_subnets { - return Ok(()); + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + tracked_vals.insert(ExactSubnet { subnet_id, slot }); } - // We are not currently subscribed and have no waiting subscription, create one - self.handle_subscriptions(exact_subnet.clone()); + // If the subscription should be done in the future, schedule it. Otherwise subscribe + // immediately. + if time_to_subscription_start.is_zero() { + // This is a current or past slot, we subscribe immediately. + self.subscribe_to_subnet_immediately( + subnet_id, + SubscriptionKind::ShortLived, + slot + 1, + )?; + } else { + // This is a future slot, schedule subscribing. + trace!(self.log, "Scheduling subnet subscription"; "subnet" => ?subnet_id, "time_to_subscription_start" => ?time_to_subscription_start); + self.scheduled_short_lived_subscriptions + .insert_at(ExactSubnet { subnet_id, slot }, time_to_subscription_start); + } - // if there is an unsubscription event for the slot prior, we remove it to prevent - // unsubscriptions immediately after the subscription. We also want to minimize - // subscription churn and maintain a consecutive subnet subscriptions. - self.unsubscriptions.retain(|subnet| { - !(subnet.subnet_id == exact_subnet.subnet_id && subnet.slot <= exact_subnet.slot) - }); - // add an unsubscription event to remove ourselves from the subnet once completed - self.unsubscriptions - .insert_at(exact_subnet, expected_end_subscription_duration); Ok(()) } - /// Updates the `known_validators` mapping and subscribes to a set of random subnets if required. - /// - /// This also updates the ENR to indicate our long-lived subscription to the subnet + /// Updates the `known_validators` mapping and subscribes to long lived subnets if required. fn add_known_validator(&mut self, validator_index: u64) { - if self.known_validators.get(&validator_index).is_none() && !self.subscribe_all_subnets { - // New validator has subscribed - // Subscribe to random topics and update the ENR if needed. - - let spec = &self.beacon_chain.spec; - - if self.random_subnets.len() < spec.attestation_subnet_count as usize { - // Still room for subscriptions - self.subscribe_to_random_subnets( - self.beacon_chain.spec.random_subnets_per_validator as usize, - ); - } - } - // add the new validator or update the current timeout for a known validator + let previously_known = self.known_validators.contains_key(&validator_index); + // Add the new validator or update the current timeout for a known validator. self.known_validators.insert(validator_index); + if !previously_known { + // New validator has subscribed. + // Subscribe to random topics and update the ENR if needed. + self.subscribe_to_random_subnets(); + } } /// Subscribe to long-lived random subnets and update the local ENR bitfield. - fn subscribe_to_random_subnets(&mut self, no_subnets_to_subscribe: usize) { - let subnet_count = self.beacon_chain.spec.attestation_subnet_count; + /// The number of subnets to subscribe depends on the number of active validators and number of + /// current subscriptions. + fn subscribe_to_random_subnets(&mut self) { + if self.subscribe_all_subnets { + // This case is not handled by this service. + return; + } - // Build a list of random subnets that we are not currently subscribed to. - let available_subnets = (0..subnet_count) + let max_subnets = self.beacon_chain.spec.attestation_subnet_count; + // Calculate how many subnets we need, + let required_long_lived_subnets = { + let subnets_for_validators = self + .known_validators + .len() + .saturating_mul(self.beacon_chain.spec.random_subnets_per_validator as usize); + subnets_for_validators // How many subnets we need + .min(max_subnets as usize) // Capped by the max + .saturating_sub(self.long_lived_subscriptions.len()) // Minus those we have + }; + + if required_long_lived_subnets == 0 { + // Nothing to do. + return; + } + + // Build a list of the subnets that we are not currently advertising. + let available_subnets = (0..max_subnets) .map(SubnetId::new) - .filter(|subnet_id| self.random_subnets.get(subnet_id).is_none()) + .filter(|subnet_id| !self.long_lived_subscriptions.contains_key(subnet_id)) .collect::>(); - let to_subscribe_subnets = { - if available_subnets.len() < no_subnets_to_subscribe { - debug!(self.log, "Reached maximum random subnet subscriptions"); - available_subnets - } else { - // select a random sample of available subnets - available_subnets - .choose_multiple(&mut rand::thread_rng(), no_subnets_to_subscribe) - .cloned() - .collect::>() + let subnets_to_subscribe: Vec<_> = available_subnets + .choose_multiple(&mut rand::thread_rng(), required_long_lived_subnets) + .cloned() + .collect(); + + // Calculate in which slot does this subscription end. + let end_slot = match self.beacon_chain.slot_clock.now() { + Some(slot) => slot + self.long_lived_subnet_subscription_slots, + None => { + return debug!( + self.log, + "Failed to calculate end slot of long lived subnet subscriptions." + ) } }; - for subnet_id in to_subscribe_subnets { - // remove this subnet from any immediate un-subscription events - self.unsubscriptions - .retain(|exact_subnet| exact_subnet.subnet_id != subnet_id); - - // insert a new random subnet - self.random_subnets.insert(subnet_id); - - // send discovery request - // Note: it's wasteful to send a DiscoverPeers request if we already have peers for this subnet. - // However, subscribing to random subnets ideally shouldn't happen very often (once in ~27 hours) and - // this makes it easier to deterministically test the attestations service. - self.events - .push_back(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery { - subnet: Subnet::Attestation(subnet_id), - min_ttl: None, - }])); - - // if we are not already subscribed, then subscribe - if !self.subscriptions.contains(&subnet_id) { - self.subscriptions.insert(subnet_id); - debug!(self.log, "Subscribing to random subnet"; "subnet_id" => ?subnet_id); - self.events - .push_back(SubnetServiceMessage::Subscribe(Subnet::Attestation( - subnet_id, - ))); + for subnet_id in &subnets_to_subscribe { + if let Err(e) = self.subscribe_to_subnet_immediately( + *subnet_id, + SubscriptionKind::LongLived, + end_slot, + ) { + debug!(self.log, "Failed to subscribe to long lived subnet"; "subnet" => ?subnet_id, "err" => e); } - - // add the subnet to the ENR bitfield - self.events - .push_back(SubnetServiceMessage::EnrAdd(Subnet::Attestation(subnet_id))); } } /* A collection of functions that handle the various timeouts */ - /// A queued subscription is ready. + /// Registers a subnet as subscribed. /// - /// We add subscriptions events even if we are already subscribed to a random subnet (as these - /// can be unsubscribed at any time by inactive validators). If we are - /// still subscribed at the time the event fires, we don't re-subscribe. - fn handle_subscriptions(&mut self, exact_subnet: ExactSubnet) { - // Check if the subnet currently exists as a long-lasting random subnet - if let Some(expiry) = self.random_subnets.get(&exact_subnet.subnet_id) { - // we are subscribed via a random subnet, if this is to expire during the time we need - // to be subscribed, just extend the expiry - let slot_duration = self.beacon_chain.slot_clock.slot_duration(); - let advance_subscription_duration = slot_duration - .checked_div(ADVANCE_SUBSCRIBE_TIME) - .expect("ADVANCE_SUBSCRIPTION_TIME cannot be too large"); - // we require the subnet subscription for at least a slot on top of the initial - // subscription time - let expected_end_subscription_duration = slot_duration + advance_subscription_duration; + /// Checks that the time in which the subscription would end is not in the past. If we are + /// already subscribed, extends the timeout if necessary. If this is a new subscription, we send + /// out the appropriate events. + fn subscribe_to_subnet_immediately( + &mut self, + subnet_id: SubnetId, + subscription_kind: SubscriptionKind, + end_slot: Slot, + ) -> Result<(), &'static str> { + if self.subscribe_all_subnets { + // Case not handled by this service. + return Ok(()); + } - if expiry < &(Instant::now() + expected_end_subscription_duration) { - self.random_subnets - .update_timeout(&exact_subnet.subnet_id, expected_end_subscription_duration); + let time_to_subscription_end = self + .beacon_chain + .slot_clock + .duration_to_slot(end_slot) + .unwrap_or_default(); + + // First check this is worth doing. + if time_to_subscription_end.is_zero() { + return Err("Time when subscription would end has already passed."); + } + + // We need to check and add a subscription for the right kind, regardless of the presence + // of the subnet as a subscription of the other kind. This is mainly since long lived + // subscriptions can be removed at any time when a validator goes offline. + let (subscriptions, already_subscribed_as_other_kind) = match subscription_kind { + SubscriptionKind::ShortLived => ( + &mut self.short_lived_subscriptions, + self.long_lived_subscriptions.contains_key(&subnet_id), + ), + SubscriptionKind::LongLived => ( + &mut self.long_lived_subscriptions, + self.short_lived_subscriptions.contains_key(&subnet_id), + ), + }; + + match subscriptions.get(&subnet_id) { + Some(current_end_slot) => { + // We are already subscribed. Check if we need to extend the subscription. + if &end_slot > current_end_slot { + trace!(self.log, "Extending subscription to subnet"; + "subnet" => ?subnet_id, + "prev_end_slot" => current_end_slot, + "new_end_slot" => end_slot, + "subscription_kind" => ?subscription_kind, + ); + subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); + } } - } else { - // we are also not un-subscribing from a subnet if the next slot requires us to be - // subscribed. Therefore there could be the case that we are already still subscribed - // to the required subnet. In which case we do not issue another subscription request. - if !self.subscriptions.contains(&exact_subnet.subnet_id) { - // we are not already subscribed - debug!(self.log, "Subscribing to subnet"; "subnet" => *exact_subnet.subnet_id, "target_slot" => exact_subnet.slot.as_u64()); - self.subscriptions.insert(exact_subnet.subnet_id); - self.events - .push_back(SubnetServiceMessage::Subscribe(Subnet::Attestation( - exact_subnet.subnet_id, + None => { + // This is a new subscription. Add with the corresponding timeout and send the + // notification. + subscriptions.insert_at(subnet_id, end_slot, time_to_subscription_end); + + // Inform of the subscription. + if !already_subscribed_as_other_kind { + debug!(self.log, "Subscribing to subnet"; + "subnet" => ?subnet_id, + "end_slot" => end_slot, + "subscription_kind" => ?subscription_kind, + ); + self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation( + subnet_id, ))); + } + + // If this is a new long lived subscription, send out the appropriate events. + if SubscriptionKind::LongLived == subscription_kind { + let subnet = Subnet::Attestation(subnet_id); + // Advertise this subnet in our ENR. + self.long_lived_subscriptions.insert_at( + subnet_id, + end_slot, + time_to_subscription_end, + ); + self.queue_event(SubnetServiceMessage::EnrAdd(subnet)); + + if !self.discovery_disabled { + self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![ + SubnetDiscovery { + subnet, + min_ttl: None, + }, + ])) + } + } } } - } - /// A queued unsubscription is ready. - /// - /// Unsubscription events are added, even if we are subscribed to long-lived random subnets. If - /// a random subnet is present, we do not unsubscribe from it. - fn handle_unsubscriptions(&mut self, exact_subnet: ExactSubnet) { - // Check if the subnet currently exists as a long-lasting random subnet - if self.random_subnets.contains(&exact_subnet.subnet_id) { - return; - } - - debug!(self.log, "Unsubscribing from subnet"; "subnet" => *exact_subnet.subnet_id, "processed_slot" => exact_subnet.slot.as_u64()); - - self.subscriptions.remove(&exact_subnet.subnet_id); - self.events - .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - exact_subnet.subnet_id, - ))); + Ok(()) } /// A random subnet has expired. /// /// This function selects a new subnet to join, or extends the expiry if there are no more /// available subnets to choose from. - fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId) { + fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId, end_slot: Slot) { let subnet_count = self.beacon_chain.spec.attestation_subnet_count; - if self.random_subnets.len() == (subnet_count - 1) as usize { - // We are at capacity, simply increase the timeout of the current subnet - self.random_subnets.insert(subnet_id); - return; - } - // If there are no unsubscription events for `subnet_id`, we unsubscribe immediately. - if !self - .unsubscriptions - .keys() - .any(|s| s.subnet_id == subnet_id) - { - // we are not at capacity, unsubscribe from the current subnet. - debug!(self.log, "Unsubscribing from random subnet"; "subnet_id" => *subnet_id); - self.events - .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( + if self.long_lived_subscriptions.len() == (subnet_count - 1) as usize { + let end_slot = end_slot + self.long_lived_subnet_subscription_slots; + // This is just an extra accuracy precaution, we could use the default timeout if + // needed. + if let Some(time_to_subscription_end) = + self.beacon_chain.slot_clock.duration_to_slot(end_slot) + { + // We are at capacity, simply increase the timeout of the current subnet. + self.long_lived_subscriptions.insert_at( subnet_id, - ))); + end_slot + 1, + time_to_subscription_end, + ); + } else { + self.long_lived_subscriptions.insert(subnet_id, end_slot); + } + return; } // Remove the ENR bitfield bit and choose a new random on from the available subnets - self.events - .push_back(SubnetServiceMessage::EnrRemove(Subnet::Attestation( + // Subscribe to a new random subnet. + self.subscribe_to_random_subnets(); + } + + // Unsubscribes from a subnet that was removed if it does not continue to exist as a + // subscription of the other kind. For long lived subscriptions, it also removes the + // advertisement from our ENR. + fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) { + let other_subscriptions = match subscription_kind { + SubscriptionKind::LongLived => &self.short_lived_subscriptions, + SubscriptionKind::ShortLived => &self.long_lived_subscriptions, + }; + + if !other_subscriptions.contains_key(&subnet_id) { + // Subscription no longer exists as short lived or long lived. + debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet_id, "subscription_kind" => ?subscription_kind); + self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( subnet_id, ))); - // Subscribe to a new random subnet - self.subscribe_to_random_subnets(1); + } + + if subscription_kind == SubscriptionKind::LongLived { + // Remove from our ENR even if we remain subscribed in other way. + self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation( + subnet_id, + ))); + } } /// A known validator has not sent a subscription in a while. They are considered offline and the @@ -552,39 +622,37 @@ impl AttestationService { /// validators to random subnets. So when a validator goes offline, we can simply remove the /// allocated amount of random subnets. fn handle_known_validator_expiry(&mut self) { - let spec = &self.beacon_chain.spec; - let subnet_count = spec.attestation_subnet_count; - let random_subnets_per_validator = spec.random_subnets_per_validator; - if self.known_validators.len() as u64 * random_subnets_per_validator >= subnet_count { - // have too many validators, ignore + // Calculate how many subnets should we remove. + let extra_subnet_count = { + let max_subnets = self.beacon_chain.spec.attestation_subnet_count; + let subnets_for_validators = self + .known_validators + .len() + .saturating_mul(self.beacon_chain.spec.random_subnets_per_validator as usize) + .min(max_subnets as usize); + + self.long_lived_subscriptions + .len() + .saturating_sub(subnets_for_validators) + }; + + if extra_subnet_count == 0 { + // Nothing to do return; } - let subscribed_subnets = self.random_subnets.keys().cloned().collect::>(); - let to_remove_subnets = subscribed_subnets.choose_multiple( - &mut rand::thread_rng(), - random_subnets_per_validator as usize, - ); + let advertised_subnets = self + .long_lived_subscriptions + .keys() + .cloned() + .collect::>(); + let to_remove_subnets = advertised_subnets + .choose_multiple(&mut rand::thread_rng(), extra_subnet_count) + .cloned(); for subnet_id in to_remove_subnets { - // If there are no unsubscription events for `subnet_id`, we unsubscribe immediately. - if !self - .unsubscriptions - .keys() - .any(|s| s.subnet_id == *subnet_id) - { - self.events - .push_back(SubnetServiceMessage::Unsubscribe(Subnet::Attestation( - *subnet_id, - ))); - } - // as the long lasting subnet subscription is being removed, remove the subnet_id from - // the ENR bitfield - self.events - .push_back(SubnetServiceMessage::EnrRemove(Subnet::Attestation( - *subnet_id, - ))); - self.random_subnets.remove(subnet_id); + self.long_lived_subscriptions.remove(&subnet_id); + self.handle_removed_subnet(subnet_id, SubscriptionKind::LongLived); } } } @@ -593,7 +661,7 @@ impl Stream for AttestationService { type Item = SubnetServiceMessage; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // update the waker if needed + // Update the waker if needed. if let Some(waker) = &self.waker { if waker.will_wake(cx.waker()) { self.waker = Some(cx.waker().clone()); @@ -602,42 +670,68 @@ impl Stream for AttestationService { self.waker = Some(cx.waker().clone()); } - // process any un-subscription events - match self.unsubscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(exact_subnet))) => self.handle_unsubscriptions(exact_subnet), + // Send out any generated events. + if let Some(event) = self.events.pop_front() { + return Poll::Ready(Some(event)); + } + + // Process first any known validator expiries, since these affect how many long lived + // subnets we need. + match self.known_validators.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(_validator_index))) => { + self.handle_known_validator_expiry(); + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Process scheduled subscriptions that might be ready, since those can extend a soon to + // expire subscription. + match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => { + if let Err(e) = self.subscribe_to_subnet_immediately( + subnet_id, + SubscriptionKind::ShortLived, + slot + 1, + ) { + debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet_id, "err" => e); + } + } + Poll::Ready(Some(Err(e))) => { + error!(self.log, "Failed to check for scheduled subnet subscriptions"; "error"=> e); + } + Poll::Ready(None) | Poll::Pending => {} + } + + // Finally process any expired subscriptions. + match self.short_lived_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => { + self.handle_removed_subnet(subnet_id, SubscriptionKind::ShortLived); + } Poll::Ready(Some(Err(e))) => { error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> e); } Poll::Ready(None) | Poll::Pending => {} } - // process any random subnet expiries - match self.random_subnets.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(subnet))) => self.handle_random_subnet_expiry(subnet), - Poll::Ready(Some(Err(e))) => { - error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); - } - Poll::Ready(None) | Poll::Pending => {} - } - - // process any known validator expiries - match self.known_validators.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(_validator_index))) => { - let _ = self.handle_known_validator_expiry(); + // Process any random subnet expiries. + match self.long_lived_subscriptions.poll_next_unpin(cx) { + Poll::Ready(Some(Ok((subnet_id, end_slot)))) => { + self.handle_random_subnet_expiry(subnet_id, end_slot) } Poll::Ready(Some(Err(e))) => { error!(self.log, "Failed to check for random subnet cycles"; "error"=> e); } Poll::Ready(None) | Poll::Pending => {} } - // poll to remove entries on expiration, no need to act on expiration events - if let Poll::Ready(Some(Err(e))) = self.aggregate_validators_on_subnet.poll_next_unpin(cx) { - error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e); - } - // process any generated events - if let Some(event) = self.events.pop_front() { - return Poll::Ready(Some(event)); + // Poll to remove entries on expiration, no need to act on expiration events. + if let Some(tracked_vals) = self.aggregate_validators_on_subnet.as_mut() { + if let Poll::Ready(Some(Err(e))) = tracked_vals.poll_next_unpin(cx) { + error!(self.log, "Failed to check for aggregate validator on subnet expirations"; "error"=> e); + } } Poll::Pending diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs index 9e92f62250..0b27ff527f 100644 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -12,7 +12,7 @@ use slog::{debug, error, o, trace, warn}; use super::SubnetServiceMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use hashset_delay::HashSetDelay; +use delay_map::HashSetDelay; use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery}; use slot_clock::SlotClock; use types::{Epoch, EthSpec, SyncCommitteeSubscription, SyncSubnetId}; diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 581f6b3270..65ca9f2194 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -8,13 +8,14 @@ use futures::prelude::*; use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lazy_static::lazy_static; use lighthouse_network::NetworkConfig; -use slog::Logger; +use slog::{o, Drain, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::sync::Arc; use std::time::{Duration, SystemTime}; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; +use task_executor::test_utils::TestRuntime; use types::{ CommitteeIndex, Epoch, EthSpec, Hash256, MainnetEthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, @@ -32,6 +33,7 @@ type TestBeaconChainType = Witness< pub struct TestBeaconChain { chain: Arc>, + _test_runtime: TestRuntime, } impl TestBeaconChain { @@ -40,17 +42,20 @@ impl TestBeaconChain { let keypairs = generate_deterministic_keypairs(1); - let log = get_logger(); + let log = get_logger(None); let store = HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let test_runtime = TestRuntime::default(); + let chain = Arc::new( BeaconChainBuilder::new(MainnetEthSpec) .logger(log.clone()) .custom_spec(spec.clone()) .store(Arc::new(store)) + .task_executor(test_runtime.task_executor.clone()) .genesis_state( interop_genesis_state::( &keypairs, @@ -74,7 +79,10 @@ impl TestBeaconChain { .build() .expect("should build"), ); - Self { chain } + Self { + chain, + _test_runtime: test_runtime, + } } } @@ -85,16 +93,32 @@ pub fn recent_genesis_time() -> u64 { .as_secs() } -fn get_logger() -> Logger { - NullLoggerBuilder.build().expect("logger should build") +fn get_logger(log_level: Option) -> Logger { + if let Some(level) = log_level { + let drain = { + let decorator = slog_term::TermDecorator::new().build(); + let decorator = + logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).chan_size(2048).build(); + drain.filter_level(level) + }; + + Logger::root(drain.fuse(), o!()) + } else { + let builder = NullLoggerBuilder; + builder.build().expect("should build logger") + } } lazy_static! { static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock(); } -fn get_attestation_service() -> AttestationService { - let log = get_logger(); +fn get_attestation_service( + log_level: Option, +) -> AttestationService { + let log = get_logger(log_level); let config = NetworkConfig::default(); let beacon_chain = CHAIN.chain.clone(); @@ -103,7 +127,7 @@ fn get_attestation_service() -> AttestationService { } fn get_sync_committee_service() -> SyncCommitteeService { - let log = get_logger(); + let log = get_logger(None); let config = NetworkConfig::default(); let beacon_chain = CHAIN.chain.clone(); @@ -120,28 +144,34 @@ async fn get_events + Unpin>( ) -> Vec { let mut events = Vec::new(); - let collect_stream_fut = async { - loop { - if let Some(result) = stream.next().await { - events.push(result); + let timeout = + tokio::time::sleep(Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout); + futures::pin_mut!(timeout); + + loop { + tokio::select! { + Some(event) = stream.next() => { + events.push(event); if let Some(num) = num_events { if events.len() == num { - return; + break; } } } - } - }; + _ = timeout.as_mut() => { + break; + } - tokio::select! { - _ = collect_stream_fut => events, - _ = tokio::time::sleep( - Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout, - ) => events + } } + + events } mod attestation_service { + + use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD; + use super::*; fn get_subscription( @@ -187,7 +217,7 @@ mod attestation_service { let committee_count = 1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); + let mut attestation_service = get_attestation_service(None); let current_slot = attestation_service .beacon_chain .slot_clock @@ -229,15 +259,18 @@ mod attestation_service { matches::assert_matches!( events[..3], [ - SubnetServiceMessage::DiscoverPeers(_), SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3) + SubnetServiceMessage::EnrAdd(_any3), + SubnetServiceMessage::DiscoverPeers(_), ] ); // If the long lived and short lived subnets are the same, there should be no more events // as we don't resubscribe already subscribed subnets. - if !attestation_service.random_subnets.contains(&subnet_id) { + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id) + { assert_eq!(expected[..], events[3..]); } // Should be subscribed to only 1 long lived subnet after unsubscription. @@ -259,7 +292,7 @@ mod attestation_service { let com2 = 0; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); + let mut attestation_service = get_attestation_service(None); let current_slot = attestation_service .beacon_chain .slot_clock @@ -311,16 +344,19 @@ mod attestation_service { matches::assert_matches!( events[..3], [ - SubnetServiceMessage::DiscoverPeers(_), SubnetServiceMessage::Subscribe(_any1), - SubnetServiceMessage::EnrAdd(_any3) + SubnetServiceMessage::EnrAdd(_any3), + SubnetServiceMessage::DiscoverPeers(_), ] ); let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); // Should be still subscribed to 1 long lived and 1 short lived subnet if both are different. - if !attestation_service.random_subnets.contains(&subnet_id1) { + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id1) + { assert_eq!(expected, events[3]); assert_eq!(attestation_service.subscription_count(), 2); } else { @@ -331,7 +367,10 @@ mod attestation_service { let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; // If the long lived and short lived subnets are different, we should get an unsubscription event. - if !attestation_service.random_subnets.contains(&subnet_id1) { + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id1) + { assert_eq!( [SubnetServiceMessage::Unsubscribe(Subnet::Attestation( subnet_id1 @@ -352,7 +391,7 @@ mod attestation_service { let committee_count = 1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); + let mut attestation_service = get_attestation_service(None); let current_slot = attestation_service .beacon_chain .slot_clock @@ -410,7 +449,7 @@ mod attestation_service { let committee_count = 1; // create the attestation service and subscriptions - let mut attestation_service = get_attestation_service(); + let mut attestation_service = get_attestation_service(None); let current_slot = attestation_service .beacon_chain .slot_clock @@ -457,6 +496,122 @@ mod attestation_service { assert_eq!(enr_add_count, 64); assert_eq!(unexpected_msg_count, 0); } + + #[tokio::test] + async fn test_subscribe_same_subnet_several_slots_apart() { + // subscription config + let validator_index = 1; + let committee_count = 1; + + // Makes 2 validator subscriptions to the same subnet but at different slots. + // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). + let subscription_slot1 = 0; + let subscription_slot2 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; + let com1 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; + let com2 = 0; + + // create the attestation service and subscriptions + let mut attestation_service = get_attestation_service(None); + let current_slot = attestation_service + .beacon_chain + .slot_clock + .now() + .expect("Could not get current slot"); + + let sub1 = get_subscription( + validator_index, + com1, + current_slot + Slot::new(subscription_slot1), + committee_count, + ); + + let sub2 = get_subscription( + validator_index, + com2, + current_slot + Slot::new(subscription_slot2), + committee_count, + ); + + let subnet_id1 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot1), + com1, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + + let subnet_id2 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot2), + com2, + committee_count, + &attestation_service.beacon_chain.spec, + ) + .unwrap(); + + // Assert that subscriptions are different but their subnet is the same + assert_ne!(sub1, sub2); + assert_eq!(subnet_id1, subnet_id2); + + // submit the subscriptions + attestation_service + .validator_subscriptions(vec![sub1, sub2]) + .unwrap(); + + // Unsubscription event should happen at the end of the slot. + let events = get_events(&mut attestation_service, None, 1).await; + matches::assert_matches!( + events[..3], + [ + SubnetServiceMessage::Subscribe(_any1), + SubnetServiceMessage::EnrAdd(_any3), + SubnetServiceMessage::DiscoverPeers(_), + ] + ); + + let expected_subscription = + SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); + let expected_unsubscription = + SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); + + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id1) + { + assert_eq!(expected_subscription, events[3]); + // fourth is a discovery event + assert_eq!(expected_unsubscription, events[5]); + } + assert_eq!(attestation_service.subscription_count(), 1); + + println!("{events:?}"); + let subscription_slot = current_slot + subscription_slot2 - 1; // one less do to the + // advance subscription time + let wait_slots = attestation_service + .beacon_chain + .slot_clock + .duration_to_slot(subscription_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; + + let no_events = dbg!(get_events(&mut attestation_service, None, wait_slots as u32).await); + + assert_eq!(no_events, []); + + let second_subscribe_event = get_events(&mut attestation_service, None, 2).await; + // If the long lived and short lived subnets are different, we should get an unsubscription event. + if !attestation_service + .subscriptions(attestation_subnets::SubscriptionKind::LongLived) + .contains_key(&subnet_id1) + { + assert_eq!( + [SubnetServiceMessage::Subscribe(Subnet::Attestation( + subnet_id1 + ))], + second_subscribe_event[..] + ); + } + } } mod sync_committee_service { diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index e76c037dad..d36bbbc79b 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -11,7 +11,9 @@ use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::sync::manager::{BatchProcessResult, Id}; use crate::sync::network_context::SyncNetworkContext; -use crate::sync::range_sync::{BatchConfig, BatchId, BatchInfo, BatchState}; +use crate::sync::range_sync::{ + BatchConfig, BatchId, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, +}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; @@ -22,7 +24,6 @@ use std::collections::{ HashMap, HashSet, }; use std::sync::Arc; -use tokio::sync::mpsc; use types::{Epoch, EthSpec, SignedBeaconBlock}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of @@ -53,7 +54,7 @@ impl BatchConfig for BackFillBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; let mut hasher = DefaultHasher::new(); @@ -142,9 +143,6 @@ pub struct BackFillSync { /// (i.e synced peers). network_globals: Arc>, - /// A multi-threaded, non-blocking processor for processing batches in the beacon chain. - beacon_processor_send: mpsc::Sender>, - /// A logger for backfill sync. log: slog::Logger, } @@ -153,7 +151,6 @@ impl BackFillSync { pub fn new( beacon_chain: Arc>, network_globals: Arc>, - beacon_processor_send: mpsc::Sender>, log: slog::Logger, ) -> Self { // Determine if backfill is enabled or not. @@ -191,7 +188,6 @@ impl BackFillSync { participating_peers: HashSet::new(), restart_failed_sync: false, beacon_chain, - beacon_processor_send, log, }; @@ -214,7 +210,7 @@ impl BackFillSync { #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn start( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, ) -> Result { match self.state() { BackFillState::Syncing => {} // already syncing ignore. @@ -310,7 +306,7 @@ impl BackFillSync { pub fn peer_disconnected( &mut self, peer_id: &PeerId, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, ) -> Result<(), BackFillError> { if matches!( self.state(), @@ -324,10 +320,10 @@ impl BackFillSync { for id in batch_ids { if let Some(batch) = self.batches.get_mut(&id) { match batch.download_failed(false) { - Ok(true) => { + Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { self.fail_sync(BackFillError::BatchDownloadFailed(id))?; } - Ok(false) => {} + Ok(BatchOperationOutcome::Continue) => {} Err(e) => { self.fail_sync(BackFillError::BatchInvalidState(id, e.0))?; } @@ -353,7 +349,7 @@ impl BackFillSync { #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn inject_error( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, peer_id: &PeerId, request_id: Id, @@ -371,8 +367,10 @@ impl BackFillSync { } match batch.download_failed(true) { Err(e) => self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)), - Ok(true) => self.fail_sync(BackFillError::BatchDownloadFailed(batch_id)), - Ok(false) => self.retry_batch_download(network, batch_id), + Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { + self.fail_sync(BackFillError::BatchDownloadFailed(batch_id)) + } + Ok(BatchOperationOutcome::Continue) => self.retry_batch_download(network, batch_id), } } else { // this could be an error for an old batch, removed when the chain advances @@ -388,11 +386,11 @@ impl BackFillSync { #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn on_block_response( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) -> Result { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -439,7 +437,7 @@ impl BackFillSync { self.process_completed_batches(network) } Err(result) => { - let (expected_boundary, received_boundary, is_failed) = match result { + let (expected_boundary, received_boundary, outcome) = match result { Err(e) => { return self .fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) @@ -450,7 +448,7 @@ impl BackFillSync { warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, "peer_id" => %peer_id, batch); - if is_failed { + if let BatchOperationOutcome::Failed { blacklist: _ } = outcome { error!(self.log, "Backfill failed"; "epoch" => batch_id, "received_boundary" => received_boundary, "expected_boundary" => expected_boundary); return self .fail_sync(BackFillError::BatchDownloadFailed(batch_id)) @@ -501,7 +499,7 @@ impl BackFillSync { /// The batch must exist and be ready for processing fn process_batch( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, ) -> Result { // Only process batches if this chain is Syncing, and only one at a time @@ -537,8 +535,8 @@ impl BackFillSync { let process_id = ChainSegmentProcessId::BackSyncBatchId(batch_id); self.current_processing_batch = Some(batch_id); - if let Err(e) = self - .beacon_processor_send + if let Err(e) = network + .processor_channel() .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) { crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch", @@ -547,15 +545,7 @@ impl BackFillSync { // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result( - network, - batch_id, - &BatchProcessResult::Failed { - imported_blocks: false, - // The beacon processor queue is full, no need to penalize the peer. - peer_action: None, - }, - ) + self.on_batch_process_result(network, batch_id, &BatchProcessResult::NonFaultyFailure) } else { Ok(ProcessResult::Successful) } @@ -567,14 +557,14 @@ impl BackFillSync { #[must_use = "A failure here indicates the backfill sync has failed and the global sync state should be updated"] pub fn on_batch_process_result( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, result: &BatchProcessResult, ) -> Result { // The first two cases are possible in regular sync, should not occur in backfill, but we // keep this logic for handling potential processing race conditions. // result - match &self.current_processing_batch { + let batch = match &self.current_processing_batch { Some(processing_id) if *processing_id != batch_id => { debug!(self.log, "Unexpected batch result"; "batch_epoch" => batch_id, "expected_batch_epoch" => processing_id); @@ -588,13 +578,9 @@ impl BackFillSync { _ => { // batch_id matches, continue self.current_processing_batch = None; - } - } - match result { - BatchProcessResult::Success(was_non_empty) => { - let batch = match self.batches.get_mut(&batch_id) { - Some(v) => v, + match self.batches.get_mut(&batch_id) { + Some(batch) => batch, None => { // This is an error. Fail the sync algorithm. return self @@ -604,9 +590,28 @@ impl BackFillSync { ))) .map(|_| ProcessResult::Successful); } - }; + } + } + }; - if let Err(e) = batch.processing_completed(true) { + let peer = match batch.current_peer() { + Some(v) => *v, + None => { + return self + .fail_sync(BackFillError::BatchInvalidState( + batch_id, + String::from("Peer does not exist"), + )) + .map(|_| ProcessResult::Successful) + } + }; + + debug!(self.log, "Backfill batch processed"; "result" => ?result, &batch, + "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); + + match result { + BatchProcessResult::Success { was_non_empty } => { + if let Err(e) = batch.processing_completed(BatchProcessingResult::Success) { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; } // If the processed batch was not empty, we can validate previous unvalidated @@ -635,42 +640,17 @@ impl BackFillSync { self.process_completed_batches(network) } } - BatchProcessResult::Failed { + BatchProcessResult::FaultyFailure { imported_blocks, - peer_action, + penalty, } => { - let batch = match self.batches.get_mut(&batch_id) { - Some(v) => v, - None => { - return self - .fail_sync(BackFillError::InvalidSyncState(format!( - "Batch not found for current processing target {}", - batch_id - ))) - .map(|_| ProcessResult::Successful) - } - }; - - let peer = match batch.current_peer() { - Some(v) => *v, - None => { - return self - .fail_sync(BackFillError::BatchInvalidState( - batch_id, - String::from("Peer does not exist"), - )) - .map(|_| ProcessResult::Successful) - } - }; - debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, - "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); - match batch.processing_completed(false) { + match batch.processing_completed(BatchProcessingResult::FaultyFailure) { Err(e) => { // Batch was in the wrong state self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) .map(|_| ProcessResult::Successful) } - Ok(true) => { + Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { // check that we have not exceeded the re-process retry counter // If a batch has exceeded the invalid batch lookup attempts limit, it means // that it is likely all peers are sending invalid batches @@ -679,23 +659,18 @@ impl BackFillSync { warn!( self.log, "Backfill batch failed to download. Penalizing peers"; - "score_adjustment" => %peer_action - .as_ref() - .map(ToString::to_string) - .unwrap_or_else(|| "None".into()), + "score_adjustment" => %penalty, "batch_epoch"=> batch_id ); - if let Some(peer_action) = peer_action { - for peer in self.participating_peers.drain() { - network.report_peer(peer, *peer_action, "backfill_batch_failed"); - } + for peer in self.participating_peers.drain() { + network.report_peer(peer, *penalty, "backfill_batch_failed"); } self.fail_sync(BackFillError::BatchProcessingFailed(batch_id)) .map(|_| ProcessResult::Successful) } - Ok(false) => { + Ok(BatchOperationOutcome::Continue) => { // chain can continue. Check if it can be progressed if *imported_blocks { // At least one block was successfully verified and imported, then we can be sure all @@ -709,13 +684,21 @@ impl BackFillSync { } } } + BatchProcessResult::NonFaultyFailure => { + if let Err(e) = batch.processing_completed(BatchProcessingResult::NonFaultyFailure) + { + self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; + } + self.retry_batch_download(network, batch_id) + .map(|_| ProcessResult::Successful) + } } } /// Processes the next ready batch. fn process_completed_batches( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, ) -> Result { // Only process batches if backfill is syncing and only process one batch at a time if self.state() != BackFillState::Syncing || self.current_processing_batch.is_some() { @@ -775,11 +758,7 @@ impl BackFillSync { /// /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. - fn advance_chain( - &mut self, - network: &mut SyncNetworkContext, - validating_epoch: Epoch, - ) { + fn advance_chain(&mut self, network: &mut SyncNetworkContext, validating_epoch: Epoch) { // make sure this epoch produces an advancement if validating_epoch >= self.current_start { return; @@ -874,7 +853,7 @@ impl BackFillSync { /// intended and can result in downvoting a peer. fn handle_invalid_batch( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, ) -> Result<(), BackFillError> { // The current batch could not be processed, indicating either the current or previous @@ -901,11 +880,11 @@ impl BackFillSync { .validation_failed() .map_err(|e| BackFillError::BatchInvalidState(batch_id, e.0))? { - true => { + BatchOperationOutcome::Failed { blacklist: _ } => { // Batch has failed and cannot be redownloaded. return self.fail_sync(BackFillError::BatchProcessingFailed(batch_id)); } - false => { + BatchOperationOutcome::Continue => { redownload_queue.push(*id); } } @@ -925,7 +904,7 @@ impl BackFillSync { /// Sends and registers the request of a batch awaiting download. fn retry_batch_download( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, ) -> Result<(), BackFillError> { let batch = match self.batches.get_mut(&batch_id) { @@ -969,7 +948,7 @@ impl BackFillSync { /// Requests the batch assigned to the given id from a given peer. fn send_batch( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, peer: PeerId, ) -> Result<(), BackFillError> { @@ -1006,8 +985,12 @@ impl BackFillSync { Err(e) => { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))? } - Ok(true) => self.fail_sync(BackFillError::BatchDownloadFailed(batch_id))?, - Ok(false) => return self.retry_batch_download(network, batch_id), + Ok(BatchOperationOutcome::Failed { blacklist: _ }) => { + self.fail_sync(BackFillError::BatchDownloadFailed(batch_id))? + } + Ok(BatchOperationOutcome::Continue) => { + return self.retry_batch_download(network, batch_id) + } } } } @@ -1018,10 +1001,7 @@ impl BackFillSync { /// When resuming a chain, this function searches for batches that need to be re-downloaded and /// transitions their state to redownload the batch. - fn resume_batches( - &mut self, - network: &mut SyncNetworkContext, - ) -> Result<(), BackFillError> { + fn resume_batches(&mut self, network: &mut SyncNetworkContext) -> Result<(), BackFillError> { let batch_ids_to_retry = self .batches .iter() @@ -1047,7 +1027,7 @@ impl BackFillSync { /// pool and left over batches until the batch buffer is reached or all peers are exhausted. fn request_batches( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, ) -> Result<(), BackFillError> { if !matches!(self.state(), BackFillState::Syncing) { return Ok(()); diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index ece923ef59..22d815121a 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -5,10 +5,10 @@ use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; -use slog::{crit, debug, error, trace, warn, Logger}; +use slog::{debug, error, trace, warn, Logger}; use smallvec::SmallVec; +use std::sync::Arc; use store::{Hash256, SignedBeaconBlock}; -use tokio::sync::mpsc; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::metrics; @@ -18,6 +18,7 @@ use self::{ single_block_lookup::SingleBlockRequest, }; +use super::manager::BlockProcessResult; use super::BatchProcessResult; use super::{ manager::{BlockProcessType, Id}, @@ -34,7 +35,7 @@ const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; pub(crate) struct BlockLookups { /// A collection of parent block lookups. - parent_queue: SmallVec<[ParentLookup; 3]>, + parent_queue: SmallVec<[ParentLookup; 3]>, /// A cache of failed chain lookups to prevent duplicate searches. failed_chains: LRUTimeCache, @@ -45,34 +46,27 @@ pub(crate) struct BlockLookups { /// The flag allows us to determine if the peer returned data or sent us nothing. single_block_lookups: FnvHashMap>, - /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender>, - /// The logger for the import manager. log: Logger, } impl BlockLookups { - pub fn new(beacon_processor_send: mpsc::Sender>, log: Logger) -> Self { + pub fn new(log: Logger) -> Self { Self { parent_queue: Default::default(), failed_chains: LRUTimeCache::new(Duration::from_secs( FAILED_CHAINS_CACHE_EXPIRY_SECONDS, )), single_block_lookups: Default::default(), - beacon_processor_send, log, } } /* Lookup requests */ - pub fn search_block( - &mut self, - hash: Hash256, - peer_id: PeerId, - cx: &mut SyncNetworkContext, - ) { + /// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is + /// constructed. + pub fn search_block(&mut self, hash: Hash256, peer_id: PeerId, cx: &mut SyncNetworkContext) { // Do not re-request a block that is already being requested if self .single_block_lookups @@ -103,11 +97,13 @@ impl BlockLookups { } } + /// If a block is attempted to be processed but we do not know its parent, this function is + /// called in order to find the block's parent. pub fn search_parent( &mut self, - block: Box>, + block: Arc>, peer_id: PeerId, - cx: &mut SyncNetworkContext, + cx: &mut SyncNetworkContext, ) { let block_root = block.canonical_root(); let parent_root = block.parent_root(); @@ -129,7 +125,7 @@ impl BlockLookups { return; } - let parent_lookup = ParentLookup::new(*block, peer_id); + let parent_lookup = ParentLookup::new(block, peer_id); self.request_parent(parent_lookup, cx); } @@ -139,20 +135,18 @@ impl BlockLookups { &mut self, id: Id, peer_id: PeerId, - block: Option>>, + block: Option>>, seen_timestamp: Duration, - cx: &mut SyncNetworkContext, + cx: &mut SyncNetworkContext, ) { let mut request = match self.single_block_lookups.entry(id) { Entry::Occupied(req) => req, Entry::Vacant(_) => { if block.is_some() { - crit!( + debug!( self.log, "Block returned for single block lookup not present" ); - #[cfg(debug_assertions)] - panic!("block returned for single block lookup not present"); } return; } @@ -166,6 +160,7 @@ impl BlockLookups { block, seen_timestamp, BlockProcessType::SingleBlock { id }, + cx, ) .is_err() { @@ -199,13 +194,14 @@ impl BlockLookups { ); } + /// Process a response received from a parent lookup request. pub fn parent_lookup_response( &mut self, id: Id, peer_id: PeerId, - block: Option>>, + block: Option>>, seen_timestamp: Duration, - cx: &mut SyncNetworkContext, + cx: &mut SyncNetworkContext, ) { let mut parent_lookup = if let Some(pos) = self .parent_queue @@ -229,6 +225,7 @@ impl BlockLookups { block, seen_timestamp, BlockProcessType::ParentLookup { chain_hash }, + cx, ) .is_ok() { @@ -246,7 +243,7 @@ impl BlockLookups { | VerifyError::ExtraBlocksReturned => { let e = e.into(); warn!(self.log, "Peer sent invalid response to parent request."; - "peer_id" => %peer_id, "reason" => e); + "peer_id" => %peer_id, "reason" => %e); // We do not tolerate these kinds of errors. We will accept a few but these are signs // of a faulty peer. @@ -256,7 +253,6 @@ impl BlockLookups { self.request_parent(parent_lookup, cx); } VerifyError::PreviousFailure { parent_root } => { - self.failed_chains.insert(parent_lookup.chain_hash()); debug!( self.log, "Parent chain ignored due to past failure"; @@ -283,7 +279,7 @@ impl BlockLookups { /* Error responses */ #[allow(clippy::needless_collect)] // false positive - pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { + pub fn peer_disconnected(&mut self, peer_id: &PeerId, cx: &mut SyncNetworkContext) { /* Check disconnection for single block lookups */ // better written after https://github.com/rust-lang/rust/issues/59618 let remove_retry_ids: Vec = self @@ -334,11 +330,12 @@ impl BlockLookups { } } + /// An RPC error has occurred during a parent lookup. This function handles this case. pub fn parent_lookup_failed( &mut self, id: Id, peer_id: PeerId, - cx: &mut SyncNetworkContext, + cx: &mut SyncNetworkContext, ) { if let Some(pos) = self .parent_queue @@ -358,9 +355,9 @@ impl BlockLookups { ); } - pub fn single_block_lookup_failed(&mut self, id: Id, cx: &mut SyncNetworkContext) { + pub fn single_block_lookup_failed(&mut self, id: Id, cx: &mut SyncNetworkContext) { if let Some(mut request) = self.single_block_lookups.remove(&id) { - request.register_failure(); + request.register_failure_downloading(); trace!(self.log, "Single block lookup failed"; "block" => %request.hash); if let Ok((peer_id, block_request)) = request.request_block() { if let Ok(request_id) = cx.single_block_lookup_request(peer_id, block_request) { @@ -380,16 +377,13 @@ impl BlockLookups { pub fn single_block_processed( &mut self, id: Id, - result: Result<(), BlockError>, - cx: &mut SyncNetworkContext, + result: BlockProcessResult, + cx: &mut SyncNetworkContext, ) { let mut req = match self.single_block_lookups.remove(&id) { Some(req) => req, None => { - #[cfg(debug_assertions)] - panic!("block processed for single block lookup not present"); - #[cfg(not(debug_assertions))] - return crit!( + return debug!( self.log, "Block processed for single block lookup not present" ); @@ -402,38 +396,57 @@ impl BlockLookups { Err(_) => return, }; - if let Err(e) = &result { - trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); - } else { - trace!(self.log, "Single block processing succeeded"; "block" => %root); - } - - if let Err(e) = result { - match e { - BlockError::BlockIsAlreadyKnown => { - // No error here - } - BlockError::BeaconChainError(e) => { - // Internal error - error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); - } - BlockError::ParentUnknown(block) => { - self.search_parent(block, peer_id, cx); - } - other => { - warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); - cx.report_peer( - peer_id, - PeerAction::MidToleranceError, - "single_block_failure", - ); - - // Try it again if possible. - req.register_failure(); - if let Ok((peer_id, request)) = req.request_block() { - if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) { - // insert with the new id - self.single_block_lookups.insert(request_id, req); + match result { + BlockProcessResult::Ok => { + trace!(self.log, "Single block processing succeeded"; "block" => %root); + } + BlockProcessResult::Ignored => { + // Beacon processor signalled to ignore the block processing result. + // This implies that the cpu is overloaded. Drop the request. + warn!( + self.log, + "Single block processing was ignored, cpu might be overloaded"; + "action" => "dropping single block request" + ); + } + BlockProcessResult::Err(e) => { + trace!(self.log, "Single block processing failed"; "block" => %root, "error" => %e); + match e { + BlockError::BlockIsAlreadyKnown => { + // No error here + } + BlockError::BeaconChainError(e) => { + // Internal error + error!(self.log, "Beacon chain error processing single block"; "block_root" => %root, "error" => ?e); + } + BlockError::ParentUnknown(block) => { + self.search_parent(block, peer_id, cx); + } + ref e @ BlockError::ExecutionPayloadError(ref epe) if !epe.penalize_peer() => { + // These errors indicate that the execution layer is offline + // and failed to validate the execution payload. Do not downscore peer. + debug!( + self.log, + "Single block lookup failed. Execution layer is offline / unsynced / misconfigured"; + "root" => %root, + "error" => ?e + ); + } + other => { + warn!(self.log, "Peer sent invalid block in single block lookup"; "root" => %root, "error" => ?other, "peer_id" => %peer_id); + cx.report_peer( + peer_id, + PeerAction::MidToleranceError, + "single_block_failure", + ); + // Try it again if possible. + req.register_failure_processing(); + if let Ok((peer_id, request)) = req.request_block() { + if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) + { + // insert with the new id + self.single_block_lookups.insert(request_id, req); + } } } } @@ -449,8 +462,8 @@ impl BlockLookups { pub fn parent_block_processed( &mut self, chain_hash: Hash256, - result: Result<(), BlockError>, - cx: &mut SyncNetworkContext, + result: BlockProcessResult, + cx: &mut SyncNetworkContext, ) { let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self .parent_queue @@ -463,37 +476,51 @@ impl BlockLookups { }) { (self.parent_queue.remove(pos), peer) } else { - #[cfg(debug_assertions)] - panic!( - "Process response for a parent lookup request that was not found. Chain_hash: {}", - chain_hash - ); - #[cfg(not(debug_assertions))] - return crit!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); + return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; - if let Err(e) = &result { - trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e); - } else { - trace!(self.log, "Parent block processing succeeded"; &parent_lookup); + match &result { + BlockProcessResult::Ok => { + trace!(self.log, "Parent block processing succeeded"; &parent_lookup) + } + BlockProcessResult::Err(e) => { + trace!(self.log, "Parent block processing failed"; &parent_lookup, "error" => %e) + } + BlockProcessResult::Ignored => { + trace!( + self.log, + "Parent block processing job was ignored"; + "action" => "re-requesting block", + &parent_lookup + ); + } } match result { - Err(BlockError::ParentUnknown(block)) => { + BlockProcessResult::Err(BlockError::ParentUnknown(block)) => { // need to keep looking for parents // add the block back to the queue and continue the search - parent_lookup.add_block(*block); + parent_lookup.add_block(block); self.request_parent(parent_lookup, cx); } - Ok(_) | Err(BlockError::BlockIsAlreadyKnown { .. }) => { + BlockProcessResult::Ok + | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { + // Check if the beacon processor is available + let beacon_processor_send = match cx.processor_channel_if_enabled() { + Some(channel) => channel, + None => { + return trace!( + self.log, + "Dropping parent chain segment that was ready for processing."; + parent_lookup + ); + } + }; let chain_hash = parent_lookup.chain_hash(); let blocks = parent_lookup.chain_blocks(); let process_id = ChainSegmentProcessId::ParentLookup(chain_hash); - match self - .beacon_processor_send - .try_send(WorkEvent::chain_segment(process_id, blocks)) - { + match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) { Ok(_) => { self.parent_queue.push(parent_lookup); } @@ -506,7 +533,19 @@ impl BlockLookups { } } } - Err(outcome) => { + ref e @ BlockProcessResult::Err(BlockError::ExecutionPayloadError(ref epe)) + if !epe.penalize_peer() => + { + // These errors indicate that the execution layer is offline + // and failed to validate the execution payload. Do not downscore peer. + debug!( + self.log, + "Parent lookup failed. Execution layer is offline"; + "chain_hash" => %chain_hash, + "error" => ?e + ); + } + BlockProcessResult::Err(outcome) => { // all else we consider the chain a failure and downvote the peer that sent // us the last block warn!( @@ -516,12 +555,22 @@ impl BlockLookups { "last_peer" => %peer_id, ); - // Add this chain to cache of failed chains - self.failed_chains.insert(chain_hash); - // This currently can be a host of errors. We permit this due to the partial // ambiguity. cx.report_peer(peer_id, PeerAction::MidToleranceError, "parent_request_err"); + + // Try again if possible + parent_lookup.processing_failed(); + self.request_parent(parent_lookup, cx); + } + BlockProcessResult::Ignored => { + // Beacon processor signalled to ignore the block processing result. + // This implies that the cpu is overloaded. Drop the request. + warn!( + self.log, + "Parent block processing was ignored, cpu might be overloaded"; + "action" => "dropping parent request" + ); } } @@ -535,7 +584,7 @@ impl BlockLookups { &mut self, chain_hash: Hash256, result: BatchProcessResult, - cx: &mut SyncNetworkContext, + cx: &mut SyncNetworkContext, ) { let parent_lookup = if let Some(pos) = self .parent_queue @@ -544,31 +593,26 @@ impl BlockLookups { { self.parent_queue.remove(pos) } else { - #[cfg(debug_assertions)] - panic!( - "Chain process response for a parent lookup request that was not found. Chain_hash: {}", - chain_hash - ); - #[cfg(not(debug_assertions))] - return crit!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); + return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash); }; debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result); match result { - BatchProcessResult::Success(_) => { + BatchProcessResult::Success { .. } => { // nothing to do. } - BatchProcessResult::Failed { + BatchProcessResult::FaultyFailure { imported_blocks: _, - peer_action, + penalty, } => { self.failed_chains.insert(parent_lookup.chain_hash()); - if let Some(peer_action) = peer_action { - for &peer_id in parent_lookup.used_peers() { - cx.report_peer(peer_id, peer_action, "parent_chain_failure") - } + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, penalty, "parent_chain_failure") } } + BatchProcessResult::NonFaultyFailure => { + // We might request this chain again if there is need but otherwise, don't try again + } } metrics::set_gauge( @@ -581,28 +625,37 @@ impl BlockLookups { fn send_block_for_processing( &mut self, - block: Box>, + block: Arc>, duration: Duration, process_type: BlockProcessType, + cx: &mut SyncNetworkContext, ) -> Result<(), ()> { - trace!(self.log, "Sending block for processing"; "block" => %block.canonical_root(), "process" => ?process_type); - let event = WorkEvent::rpc_beacon_block(block, duration, process_type); - if let Err(e) = self.beacon_processor_send.try_send(event) { - error!( - self.log, - "Failed to send sync block to processor"; - "error" => ?e - ); - return Err(()); + match cx.processor_channel_if_enabled() { + Some(beacon_processor_send) => { + trace!(self.log, "Sending block for processing"; "block" => %block.canonical_root(), "process" => ?process_type); + let event = WorkEvent::rpc_beacon_block(block, duration, process_type); + if let Err(e) = beacon_processor_send.try_send(event) { + error!( + self.log, + "Failed to send sync block to processor"; + "error" => ?e + ); + Err(()) + } else { + Ok(()) + } + } + None => { + trace!(self.log, "Dropping block ready for processing. Beacon processor not available"; "block" => %block.canonical_root()); + Err(()) + } } - - Ok(()) } fn request_parent( &mut self, - mut parent_lookup: ParentLookup, - cx: &mut SyncNetworkContext, + mut parent_lookup: ParentLookup, + cx: &mut SyncNetworkContext, ) { match parent_lookup.request_parent(cx) { Err(e) => { @@ -611,14 +664,26 @@ impl BlockLookups { parent_lookup::RequestError::SendFailed(_) => { // Probably shutting down, nothing to do here. Drop the request } - parent_lookup::RequestError::ChainTooLong - | parent_lookup::RequestError::TooManyAttempts => { + parent_lookup::RequestError::ChainTooLong => { self.failed_chains.insert(parent_lookup.chain_hash()); // This indicates faulty peers. for &peer_id in parent_lookup.used_peers() { cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) } } + parent_lookup::RequestError::TooManyAttempts { cannot_process } => { + // We only consider the chain failed if we were unable to process it. + // We could have failed because one peer continually failed to send us + // bad blocks. We still allow other peers to send us this chain. Note + // that peers that do this, still get penalised. + if cannot_process { + self.failed_chains.insert(parent_lookup.chain_hash()); + } + // This indicates faulty peers. + for &peer_id in parent_lookup.used_peers() { + cx.report_peer(peer_id, PeerAction::LowToleranceError, e.as_static()) + } + } parent_lookup::RequestError::NoPeers => { // This happens if the peer disconnects while the block is being // processed. Drop the request without extra penalty @@ -637,4 +702,14 @@ impl BlockLookups { self.parent_queue.len() as i64, ); } + + /// Drops all the single block requests and returns how many requests were dropped. + pub fn drop_single_block_requests(&mut self) -> usize { + self.single_block_lookups.drain().len() + } + + /// Drops all the parent chain requests and returns how many requests were dropped. + pub fn drop_parent_chain_requests(&mut self) -> usize { + self.parent_queue.drain(..).len() + } } diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index a9a3c34bc0..295d9cc94b 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,5 +1,7 @@ +use beacon_chain::BeaconChainTypes; use lighthouse_network::PeerId; -use store::{EthSpec, Hash256, SignedBeaconBlock}; +use std::sync::Arc; +use store::{Hash256, SignedBeaconBlock}; use strum::IntoStaticStr; use crate::sync::{ @@ -9,7 +11,7 @@ use crate::sync::{ use super::single_block_lookup::{self, SingleBlockRequest}; -/// How many attempts we try to find a parent of a block before we give up trying . +/// How many attempts we try to find a parent of a block before we give up trying. pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth @@ -17,11 +19,11 @@ pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; /// Maintains a sequential list of parents to lookup and the lookup's current state. -pub(crate) struct ParentLookup { +pub(crate) struct ParentLookup { /// The root of the block triggering this parent request. chain_hash: Hash256, /// The blocks that have currently been downloaded. - downloaded_blocks: Vec>, + downloaded_blocks: Vec>>, /// Request of the last parent. current_parent_request: SingleBlockRequest, /// Id of the last parent request. @@ -40,18 +42,23 @@ pub enum VerifyError { pub enum RequestError { SendFailed(&'static str), ChainTooLong, - TooManyAttempts, + /// We witnessed too many failures trying to complete this parent lookup. + TooManyAttempts { + /// We received more failures trying to process the blocks than downloading them + /// from peers. + cannot_process: bool, + }, NoPeers, } -impl ParentLookup { - pub fn contains_block(&self, block: &SignedBeaconBlock) -> bool { +impl ParentLookup { + pub fn contains_block(&self, block: &SignedBeaconBlock) -> bool { self.downloaded_blocks .iter() - .any(|d_block| d_block == block) + .any(|d_block| d_block.as_ref() == block) } - pub fn new(block: SignedBeaconBlock, peer_id: PeerId) -> Self { + pub fn new(block: Arc>, peer_id: PeerId) -> Self { let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); Self { @@ -86,7 +93,7 @@ impl ParentLookup { self.current_parent_request.check_peer_disconnected(peer_id) } - pub fn add_block(&mut self, block: SignedBeaconBlock) { + pub fn add_block(&mut self, block: Arc>) { let next_parent = block.parent_root(); self.downloaded_blocks.push(block); self.current_parent_request.hash = next_parent; @@ -104,11 +111,16 @@ impl ParentLookup { } pub fn download_failed(&mut self) { - self.current_parent_request.register_failure(); + self.current_parent_request.register_failure_downloading(); self.current_parent_request_id = None; } - pub fn chain_blocks(&mut self) -> Vec> { + pub fn processing_failed(&mut self) { + self.current_parent_request.register_failure_processing(); + self.current_parent_request_id = None; + } + + pub fn chain_blocks(&mut self) -> Vec>> { std::mem::take(&mut self.downloaded_blocks) } @@ -116,16 +128,16 @@ impl ParentLookup { /// the processing result of the block. pub fn verify_block( &mut self, - block: Option>>, + block: Option>>, failed_chains: &mut lru_cache::LRUTimeCache, - ) -> Result>>, VerifyError> { + ) -> Result>>, VerifyError> { let block = self.current_parent_request.verify_block(block)?; // check if the parent of this block isn't in the failed cache. If it is, this chain should // be dropped and the peer downscored. if let Some(parent_root) = block.as_ref().map(|block| block.parent_root()) { if failed_chains.contains(&parent_root) { - self.current_parent_request.register_failure(); + self.current_parent_request.register_failure_downloading(); self.current_parent_request_id = None; return Err(VerifyError::PreviousFailure { parent_root }); } @@ -143,7 +155,7 @@ impl ParentLookup { #[cfg(test)] pub fn failed_attempts(&self) -> u8 { - self.current_parent_request.failed_attempts + self.current_parent_request.failed_attempts() } pub fn add_peer(&mut self, block_root: &Hash256, peer_id: &PeerId) -> bool { @@ -170,13 +182,15 @@ impl From for RequestError { fn from(e: super::single_block_lookup::LookupRequestError) -> Self { use super::single_block_lookup::LookupRequestError as E; match e { - E::TooManyAttempts => RequestError::TooManyAttempts, + E::TooManyAttempts { cannot_process } => { + RequestError::TooManyAttempts { cannot_process } + } E::NoPeers => RequestError::NoPeers, } } } -impl slog::KV for ParentLookup { +impl slog::KV for ParentLookup { fn serialize( &self, record: &slog::Record, @@ -194,7 +208,10 @@ impl RequestError { match self { RequestError::SendFailed(e) => e, RequestError::ChainTooLong => "chain_too_long", - RequestError::TooManyAttempts => "too_many_attempts", + RequestError::TooManyAttempts { cannot_process } if *cannot_process => { + "too_many_processing_attempts" + } + RequestError::TooManyAttempts { cannot_process: _ } => "too_many_downloading_attempts", RequestError::NoPeers => "no_peers", } } diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 347a4ae437..8ba5b17bfa 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,4 +1,5 @@ use std::collections::HashSet; +use std::sync::Arc; use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; @@ -17,8 +18,10 @@ pub struct SingleBlockRequest { pub available_peers: HashSet, /// Peers from which we have requested this block. pub used_peers: HashSet, - /// How many times have we attempted this block. - pub failed_attempts: u8, + /// How many times have we attempted to process this block. + failed_processing: u8, + /// How many times have we attempted to download this block. + failed_downloading: u8, } #[derive(Debug, PartialEq, Eq)] @@ -37,7 +40,11 @@ pub enum VerifyError { #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupRequestError { - TooManyAttempts, + /// Too many failed attempts + TooManyAttempts { + /// The failed attempts were primarily due to processing failures. + cannot_process: bool, + }, NoPeers, } @@ -48,15 +55,29 @@ impl SingleBlockRequest { state: State::AwaitingDownload, available_peers: HashSet::from([peer_id]), used_peers: HashSet::default(), - failed_attempts: 0, + failed_processing: 0, + failed_downloading: 0, } } - pub fn register_failure(&mut self) { - self.failed_attempts += 1; + /// Registers a failure in processing a block. + pub fn register_failure_processing(&mut self) { + self.failed_processing = self.failed_processing.saturating_add(1); self.state = State::AwaitingDownload; } + /// Registers a failure in downloading a block. This might be a peer disconnection or a wrong + /// block. + pub fn register_failure_downloading(&mut self) { + self.failed_downloading = self.failed_downloading.saturating_add(1); + self.state = State::AwaitingDownload; + } + + /// The total number of failures, whether it be processing or downloading. + pub fn failed_attempts(&self) -> u8 { + self.failed_processing + self.failed_downloading + } + pub fn add_peer(&mut self, hash: &Hash256, peer_id: &PeerId) -> bool { let is_useful = &self.hash == hash; if is_useful { @@ -71,7 +92,7 @@ impl SingleBlockRequest { if let State::Downloading { peer_id } = &self.state { if peer_id == dc_peer_id { // Peer disconnected before providing a block - self.register_failure(); + self.register_failure_downloading(); return Err(()); } } @@ -82,18 +103,20 @@ impl SingleBlockRequest { /// Returns the block for processing if the response is what we expected. pub fn verify_block( &mut self, - block: Option>>, - ) -> Result>>, VerifyError> { + block: Option>>, + ) -> Result>>, VerifyError> { match self.state { State::AwaitingDownload => { - self.register_failure(); + self.register_failure_downloading(); Err(VerifyError::ExtraBlocksReturned) } State::Downloading { peer_id } => match block { Some(block) => { if block.canonical_root() != self.hash { // return an error and drop the block - self.register_failure(); + // NOTE: we take this is as a download failure to prevent counting the + // attempt as a chain failure, but simply a peer failure. + self.register_failure_downloading(); Err(VerifyError::RootMismatch) } else { // Return the block for processing. @@ -102,14 +125,14 @@ impl SingleBlockRequest { } } None => { - self.register_failure(); + self.register_failure_downloading(); Err(VerifyError::NoBlockReturned) } }, State::Processing { peer_id: _ } => match block { Some(_) => { // We sent the block for processing and received an extra block. - self.register_failure(); + self.register_failure_downloading(); Err(VerifyError::ExtraBlocksReturned) } None => { @@ -123,19 +146,19 @@ impl SingleBlockRequest { pub fn request_block(&mut self) -> Result<(PeerId, BlocksByRootRequest), LookupRequestError> { debug_assert!(matches!(self.state, State::AwaitingDownload)); - if self.failed_attempts <= MAX_ATTEMPTS { - if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { - let request = BlocksByRootRequest { - block_roots: VariableList::from(vec![self.hash]), - }; - self.state = State::Downloading { peer_id }; - self.used_peers.insert(peer_id); - Ok((peer_id, request)) - } else { - Err(LookupRequestError::NoPeers) - } + if self.failed_attempts() >= MAX_ATTEMPTS { + Err(LookupRequestError::TooManyAttempts { + cannot_process: self.failed_processing >= self.failed_downloading, + }) + } else if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) { + let request = BlocksByRootRequest { + block_roots: VariableList::from(vec![self.hash]), + }; + self.state = State::Downloading { peer_id }; + self.used_peers.insert(peer_id); + Ok((peer_id, request)) } else { - Err(LookupRequestError::TooManyAttempts) + Err(LookupRequestError::NoPeers) } } @@ -168,6 +191,8 @@ impl slog::Value for SingleBlockRequest { serializer.emit_arguments("processing_peer", &format_args!("{}", peer_id))? } } + serializer.emit_u8("failed_downloads", self.failed_downloading)?; + serializer.emit_u8("failed_processing", self.failed_processing)?; slog::Result::Ok(()) } } @@ -195,15 +220,32 @@ mod tests { let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); sl.request_block().unwrap(); - sl.verify_block(Some(Box::new(block))).unwrap().unwrap(); + sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); } #[test] - fn test_max_attempts() { + fn test_block_lookup_failures() { + const FAILURES: u8 = 3; let peer_id = PeerId::random(); let block = rand_block(); - let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); - sl.register_failure(); + let mut sl = SingleBlockRequest::::new(block.canonical_root(), peer_id); + for _ in 1..FAILURES { + sl.request_block().unwrap(); + sl.register_failure_downloading(); + } + + // Now we receive the block and send it for processing + sl.request_block().unwrap(); + sl.verify_block(Some(Arc::new(block))).unwrap().unwrap(); + + // One processing failure maxes the available attempts + sl.register_failure_processing(); + assert_eq!( + sl.request_block(), + Err(LookupRequestError::TooManyAttempts { + cannot_process: false + }) + ) } } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index dde7d49953..ead15e23a5 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -12,6 +12,7 @@ use lighthouse_network::{NetworkGlobals, Request}; use slog::{Drain, Level}; use slot_clock::SystemTimeSlotClock; use store::MemoryStore; +use tokio::sync::mpsc; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use types::MinimalEthSpec as E; @@ -26,7 +27,7 @@ struct TestRig { const D: Duration = Duration::new(0, 0); impl TestRig { - fn test_setup(log_level: Option) -> (BlockLookups, SyncNetworkContext, Self) { + fn test_setup(log_level: Option) -> (BlockLookups, SyncNetworkContext, Self) { let log = { let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); @@ -47,15 +48,13 @@ impl TestRig { network_rx, rng, }; - let bl = BlockLookups::new( - beacon_processor_tx, - log.new(slog::o!("component" => "block_lookups")), - ); + let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); let cx = { let globals = Arc::new(NetworkGlobals::new_test_globals(&log)); SyncNetworkContext::new( network_tx, globals, + beacon_processor_tx, log.new(slog::o!("component" => "network_context")), ) }; @@ -158,7 +157,7 @@ fn test_single_block_lookup_happy_path() { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Box::new(block)), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); rig.expect_empty_network(); rig.expect_block_process(); @@ -168,7 +167,7 @@ fn test_single_block_lookup_happy_path() { // Send the stream termination. Peer should have not been penalized, and the request removed // after processing. bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); - bl.single_block_processed(id, Ok(()), &mut cx); + bl.single_block_processed(id, Ok(()).into(), &mut cx); rig.expect_empty_network(); assert_eq!(bl.single_block_lookups.len(), 0); } @@ -204,7 +203,7 @@ fn test_single_block_lookup_wrong_response() { // Peer sends something else. It should be penalized. let bad_block = rig.rand_block(); - bl.single_block_lookup_response(id, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); rig.expect_block_request(); // should be retried @@ -243,7 +242,7 @@ fn test_single_block_lookup_becomes_parent_request() { // The peer provides the correct block, should not be penalized. Now the block should be sent // for processing. - bl.single_block_lookup_response(id, peer_id, Some(Box::new(block.clone())), D, &mut cx); + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); rig.expect_empty_network(); rig.expect_block_process(); @@ -252,7 +251,11 @@ fn test_single_block_lookup_becomes_parent_request() { // Send the stream termination. Peer should have not been penalized, and the request moved to a // parent request after processing. - bl.single_block_processed(id, Err(BlockError::ParentUnknown(Box::new(block))), &mut cx); + bl.single_block_processed( + id, + BlockError::ParentUnknown(Arc::new(block)).into(), + &mut cx, + ); assert_eq!(bl.single_block_lookups.len(), 0); rig.expect_parent_request(); rig.expect_empty_network(); @@ -269,18 +272,21 @@ fn test_parent_lookup_happy_path() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id = rig.expect_parent_request(); // Peer sends the right block, it should be sent for processing. Peer should not be penalized. - bl.parent_lookup_response(id, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); rig.expect_empty_network(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Err(BlockError::BlockIsAlreadyKnown), &mut cx); + bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx); rig.expect_parent_chain_process(); - bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); assert_eq!(bl.parent_queue.len(), 0); } @@ -294,12 +300,12 @@ fn test_parent_lookup_wrong_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends the wrong block, peer should be penalized and the block re-requested. let bad_block = rig.rand_block(); - bl.parent_lookup_response(id1, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.parent_lookup_response(id1, peer_id, Some(Arc::new(bad_block)), D, &mut cx); rig.expect_penalty(); let id2 = rig.expect_parent_request(); @@ -308,13 +314,16 @@ fn test_parent_lookup_wrong_response() { rig.expect_empty_network(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()), &mut cx); + bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); - bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); assert_eq!(bl.parent_queue.len(), 0); } @@ -328,7 +337,7 @@ fn test_parent_lookup_empty_response() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // Peer sends an empty response, peer should be penalized and the block re-requested. @@ -337,13 +346,16 @@ fn test_parent_lookup_empty_response() { let id2 = rig.expect_parent_request(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()), &mut cx); + bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); - bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); assert_eq!(bl.parent_queue.len(), 0); } @@ -357,7 +369,7 @@ fn test_parent_lookup_rpc_failure() { let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); + bl.search_parent(Arc::new(block), peer_id, &mut cx); let id1 = rig.expect_parent_request(); // The request fails. It should be tried again. @@ -365,13 +377,16 @@ fn test_parent_lookup_rpc_failure() { let id2 = rig.expect_parent_request(); // Send the right block this time. - bl.parent_lookup_response(id2, peer_id, Some(Box::new(parent)), D, &mut cx); + bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx); rig.expect_block_process(); // Processing succeeds, now the rest of the chain should be sent for processing. - bl.parent_block_processed(chain_hash, Ok(()), &mut cx); + bl.parent_block_processed(chain_hash, Ok(()).into(), &mut cx); rig.expect_parent_chain_process(); - bl.parent_chain_processed(chain_hash, BatchProcessResult::Success(true), &mut cx); + let process_result = BatchProcessResult::Success { + was_non_empty: true, + }; + bl.parent_chain_processed(chain_hash, process_result, &mut cx); assert_eq!(bl.parent_queue.len(), 0); } @@ -381,12 +396,11 @@ fn test_parent_lookup_too_many_attempts() { let parent = rig.rand_block(); let block = rig.block_with_parent(parent.canonical_root()); - let chain_hash = block.canonical_root(); let peer_id = PeerId::random(); // Trigger the request - bl.search_parent(Box::new(block), peer_id, &mut cx); - for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE + 1 { + bl.search_parent(Arc::new(block), peer_id, &mut cx); + for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { let id = rig.expect_parent_request(); match i % 2 { // make sure every error is accounted for @@ -397,7 +411,9 @@ fn test_parent_lookup_too_many_attempts() { _ => { // Send a bad block this time. It should be tried again. let bad_block = rig.rand_block(); - bl.parent_lookup_response(id, peer_id, Some(Box::new(bad_block)), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + // Send the stream termination + bl.parent_lookup_response(id, peer_id, None, D, &mut cx); rig.expect_penalty(); } } @@ -407,7 +423,74 @@ fn test_parent_lookup_too_many_attempts() { } assert_eq!(bl.parent_queue.len(), 0); - assert!(bl.failed_chains.contains(&chain_hash)); +} + +#[test] +fn test_parent_lookup_too_many_download_attempts_no_blacklist() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let parent = rig.rand_block(); + let block = rig.block_with_parent(parent.canonical_root()); + let block_hash = block.canonical_root(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_parent(Arc::new(block), peer_id, &mut cx); + for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE { + assert!(!bl.failed_chains.contains(&block_hash)); + let id = rig.expect_parent_request(); + if i % 2 != 0 { + // The request fails. It should be tried again. + bl.parent_lookup_failed(id, peer_id, &mut cx); + } else { + // Send a bad block this time. It should be tried again. + let bad_block = rig.rand_block(); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx); + rig.expect_penalty(); + } + if i < parent_lookup::PARENT_FAIL_TOLERANCE { + assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i)); + } + } + + assert_eq!(bl.parent_queue.len(), 0); + assert!(!bl.failed_chains.contains(&block_hash)); + assert!(!bl.failed_chains.contains(&parent.canonical_root())); +} + +#[test] +fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { + const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1; + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let parent = Arc::new(rig.rand_block()); + let block = rig.block_with_parent(parent.canonical_root()); + let block_hash = block.canonical_root(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_parent(Arc::new(block), peer_id, &mut cx); + + // Fail downloading the block + for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { + let id = rig.expect_parent_request(); + // The request fails. It should be tried again. + bl.parent_lookup_failed(id, peer_id, &mut cx); + } + + // Now fail processing a block in the parent request + for _ in 0..PROCESSING_FAILURES { + let id = dbg!(rig.expect_parent_request()); + assert!(!bl.failed_chains.contains(&block_hash)); + // send the right parent but fail processing + bl.parent_lookup_response(id, peer_id, Some(parent.clone()), D, &mut cx); + bl.parent_block_processed(block_hash, BlockError::InvalidSignature.into(), &mut cx); + bl.parent_lookup_response(id, peer_id, None, D, &mut cx); + rig.expect_penalty(); + } + + assert!(bl.failed_chains.contains(&block_hash)); + assert_eq!(bl.parent_queue.len(), 0); } #[test] @@ -427,12 +510,12 @@ fn test_parent_lookup_too_deep() { let peer_id = PeerId::random(); let trigger_block = blocks.pop().unwrap(); let chain_hash = trigger_block.canonical_root(); - bl.search_parent(Box::new(trigger_block), peer_id, &mut cx); + bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); for block in blocks.into_iter().rev() { let id = rig.expect_parent_request(); // the block - bl.parent_lookup_response(id, peer_id, Some(Box::new(block.clone())), D, &mut cx); + bl.parent_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx); // the stream termination bl.parent_lookup_response(id, peer_id, None, D, &mut cx); // the processing request @@ -440,7 +523,7 @@ fn test_parent_lookup_too_deep() { // the processing result bl.parent_block_processed( chain_hash, - Err(BlockError::ParentUnknown(Box::new(block))), + BlockError::ParentUnknown(Arc::new(block)).into(), &mut cx, ) } @@ -454,7 +537,60 @@ fn test_parent_lookup_disconnection() { let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let peer_id = PeerId::random(); let trigger_block = rig.rand_block(); - bl.search_parent(Box::new(trigger_block), peer_id, &mut cx); + bl.search_parent(Arc::new(trigger_block), peer_id, &mut cx); bl.peer_disconnected(&peer_id, &mut cx); assert!(bl.parent_queue.is_empty()); } + +#[test] +fn test_single_block_lookup_ignored_response() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let block = rig.rand_block(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_block(block.canonical_root(), peer_id, &mut cx); + let id = rig.expect_block_request(); + + // The peer provides the correct block, should not be penalized. Now the block should be sent + // for processing. + bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx); + rig.expect_empty_network(); + rig.expect_block_process(); + + // The request should still be active. + assert_eq!(bl.single_block_lookups.len(), 1); + + // Send the stream termination. Peer should have not been penalized, and the request removed + // after processing. + bl.single_block_lookup_response(id, peer_id, None, D, &mut cx); + // Send an Ignored response, the request should be dropped + bl.single_block_processed(id, BlockProcessResult::Ignored, &mut cx); + rig.expect_empty_network(); + assert_eq!(bl.single_block_lookups.len(), 0); +} + +#[test] +fn test_parent_lookup_ignored_response() { + let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); + + let parent = rig.rand_block(); + let block = rig.block_with_parent(parent.canonical_root()); + let chain_hash = block.canonical_root(); + let peer_id = PeerId::random(); + + // Trigger the request + bl.search_parent(Arc::new(block), peer_id, &mut cx); + let id = rig.expect_parent_request(); + + // Peer sends the right block, it should be sent for processing. Peer should not be penalized. + bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx); + rig.expect_block_process(); + rig.expect_empty_network(); + + // Return an Ignored result. The request should be dropped + bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx); + rig.expect_empty_network(); + assert_eq!(bl.parent_queue.len(), 0); +} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 53480db88e..6230347977 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -41,7 +41,8 @@ use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; +use futures::StreamExt; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; @@ -88,12 +89,12 @@ pub enum SyncMessage { RpcBlock { request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + beacon_block: Option>>, seen_timestamp: Duration, }, /// A block with an unknown parent has been received. - UnknownBlock(PeerId, Box>), + UnknownBlock(PeerId, Arc>), /// A peer has sent an object that references a block that is unknown. This triggers the /// manager to attempt to find the block matching the unknown hash. @@ -117,7 +118,7 @@ pub enum SyncMessage { /// Block processed BlockProcessed { process_type: BlockProcessType, - result: Result<(), BlockError>, + result: BlockProcessResult, }, } @@ -128,16 +129,26 @@ pub enum BlockProcessType { ParentLookup { chain_hash: Hash256 }, } +#[derive(Debug)] +pub enum BlockProcessResult { + Ok, + Err(BlockError), + Ignored, +} + /// The result of processing multiple blocks (a chain segment). #[derive(Debug)] pub enum BatchProcessResult { /// The batch was completed successfully. It carries whether the sent batch contained blocks. - Success(bool), - /// The batch processing failed. It carries whether the processing imported any block. - Failed { - imported_blocks: bool, - peer_action: Option, + Success { + was_non_empty: bool, }, + /// The batch processing failed. It carries whether the processing imported any block. + FaultyFailure { + imported_blocks: bool, + penalty: PeerAction, + }, + NonFaultyFailure, } /// The primary object for handling and driving all the current syncing logic. It maintains the @@ -155,7 +166,7 @@ pub struct SyncManager { input_channel: mpsc::UnboundedReceiver>, /// A network context to contact the network service. - network: SyncNetworkContext, + network: SyncNetworkContext, /// The object handling long-range batch load-balanced syncing. range_sync: RangeSync, @@ -192,19 +203,15 @@ pub fn spawn( chain: beacon_chain.clone(), network_globals: network_globals.clone(), input_channel: sync_recv, - network: SyncNetworkContext::new(network_send, network_globals.clone(), log.clone()), - range_sync: RangeSync::new( - beacon_chain.clone(), - beacon_processor_send.clone(), + network: SyncNetworkContext::new( + network_send, + network_globals.clone(), + beacon_processor_send, log.clone(), ), - backfill_sync: BackFillSync::new( - beacon_chain, - network_globals, - beacon_processor_send.clone(), - log.clone(), - ), - block_lookups: BlockLookups::new(beacon_processor_send, log.clone()), + range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), + backfill_sync: BackFillSync::new(beacon_chain, network_globals, log.clone()), + block_lookups: BlockLookups::new(log.clone()), log: log.clone(), }; @@ -228,17 +235,12 @@ impl SyncManager { /// ours that we consider it fully sync'd with respect to our current chain. fn add_peer(&mut self, peer_id: PeerId, remote: SyncInfo) { // ensure the beacon chain still exists - let local = match self.chain.status_message() { - Ok(status) => SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, - }, - Err(e) => { - return error!(self.log, "Failed to get peer sync info"; - "msg" => "likely due to head lock contention", "err" => ?e) - } + let status = self.chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, }; let sync_type = remote_sync_type(&local, &remote, &self.chain); @@ -327,10 +329,17 @@ impl SyncManager { if let Some(was_updated) = update_sync_status { let is_connected = self.network_globals.peers.read().is_connected(peer_id); if was_updated { - debug!(self.log, "Peer transitioned sync state"; "peer_id" => %peer_id, "new_state" => rpr, - "our_head_slot" => local_sync_info.head_slot, "out_finalized_epoch" => local_sync_info.finalized_epoch, - "their_head_slot" => remote_sync_info.head_slot, "their_finalized_epoch" => remote_sync_info.finalized_epoch, - "is_connected" => is_connected); + debug!( + self.log, + "Peer transitioned sync state"; + "peer_id" => %peer_id, + "new_state" => rpr, + "our_head_slot" => local_sync_info.head_slot, + "our_finalized_epoch" => local_sync_info.finalized_epoch, + "their_head_slot" => remote_sync_info.head_slot, + "their_finalized_epoch" => remote_sync_info.finalized_epoch, + "is_connected" => is_connected + ); // A peer has transitioned its sync state. If the new state is "synced" we // inform the backfill sync that a new synced peer has joined us. @@ -371,7 +380,7 @@ impl SyncManager { // advanced and will produce a head chain on re-status. Otherwise it will shift // to being synced let mut sync_state = { - let head = self.chain.best_slot().unwrap_or_else(|_| Slot::new(0)); + let head = self.chain.best_slot(); let current_slot = self.chain.slot().unwrap_or_else(|_| Slot::new(0)); let peers = self.network_globals.peers.read(); @@ -456,104 +465,178 @@ impl SyncManager { /// The main driving future for the sync manager. async fn main(&mut self) { + let check_ee = self.chain.execution_layer.is_some(); + let mut check_ee_stream = { + // some magic to have an instance implementing stream even if there is no execution layer + let ee_responsiveness_watch: futures::future::OptionFuture<_> = self + .chain + .execution_layer + .as_ref() + .map(|el| el.get_responsiveness_watch()) + .into(); + futures::stream::iter(ee_responsiveness_watch.await).flatten() + }; + // process any inbound messages loop { - if let Some(sync_message) = self.input_channel.recv().await { - match sync_message { - SyncMessage::AddPeer(peer_id, info) => { - self.add_peer(peer_id, info); - } - SyncMessage::RpcBlock { - request_id, - peer_id, - beacon_block, - seen_timestamp, - } => { - self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); - } - SyncMessage::UnknownBlock(peer_id, block) => { - // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore - if !self.network_globals.sync_state.read().is_synced() { - let head_slot = self - .chain - .head_info() - .map(|info| info.slot) - .unwrap_or_else(|_| Slot::from(0u64)); - let unknown_block_slot = block.slot(); + tokio::select! { + Some(sync_message) = self.input_channel.recv() => { + self.handle_message(sync_message); + }, + Some(engine_state) = check_ee_stream.next(), if check_ee => { + self.handle_new_execution_engine_state(engine_state); + } + } + } + } - // if the block is far in the future, ignore it. If its within the slot tolerance of - // our current head, regardless of the syncing state, fetch it. - if (head_slot >= unknown_block_slot - && head_slot.sub(unknown_block_slot).as_usize() - > SLOT_IMPORT_TOLERANCE) - || (head_slot < unknown_block_slot - && unknown_block_slot.sub(head_slot).as_usize() - > SLOT_IMPORT_TOLERANCE) - { - continue; - } - } - if self.network_globals.peers.read().is_connected(&peer_id) { - self.block_lookups - .search_parent(block, peer_id, &mut self.network); - } + fn handle_message(&mut self, sync_message: SyncMessage) { + match sync_message { + SyncMessage::AddPeer(peer_id, info) => { + self.add_peer(peer_id, info); + } + SyncMessage::RpcBlock { + request_id, + peer_id, + beacon_block, + seen_timestamp, + } => { + self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); + } + SyncMessage::UnknownBlock(peer_id, block) => { + // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore + if !self.network_globals.sync_state.read().is_synced() { + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + let unknown_block_slot = block.slot(); + + // if the block is far in the future, ignore it. If its within the slot tolerance of + // our current head, regardless of the syncing state, fetch it. + if (head_slot >= unknown_block_slot + && head_slot.sub(unknown_block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + || (head_slot < unknown_block_slot + && unknown_block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + { + return; } - SyncMessage::UnknownBlockHash(peer_id, block_hash) => { - // If we are not synced, ignore this block. - if self.network_globals.sync_state.read().is_synced() - && self.network_globals.peers.read().is_connected(&peer_id) - { - self.block_lookups - .search_block(block_hash, peer_id, &mut self.network); - } - } - SyncMessage::Disconnect(peer_id) => { - self.peer_disconnect(&peer_id); - } - SyncMessage::RpcError { - peer_id, - request_id, - } => self.inject_error(peer_id, request_id), - SyncMessage::BlockProcessed { - process_type, + } + if self.network_globals.peers.read().is_connected(&peer_id) + && self.network.is_execution_engine_online() + { + self.block_lookups + .search_parent(block, peer_id, &mut self.network); + } + } + SyncMessage::UnknownBlockHash(peer_id, block_hash) => { + // If we are not synced, ignore this block. + if self.network_globals.sync_state.read().is_synced() + && self.network_globals.peers.read().is_connected(&peer_id) + && self.network.is_execution_engine_online() + { + self.block_lookups + .search_block(block_hash, peer_id, &mut self.network); + } + } + SyncMessage::Disconnect(peer_id) => { + self.peer_disconnect(&peer_id); + } + SyncMessage::RpcError { + peer_id, + request_id, + } => self.inject_error(peer_id, request_id), + SyncMessage::BlockProcessed { + process_type, + result, + } => match process_type { + BlockProcessType::SingleBlock { id } => { + self.block_lookups + .single_block_processed(id, result, &mut self.network) + } + BlockProcessType::ParentLookup { chain_hash } => self + .block_lookups + .parent_block_processed(chain_hash, result, &mut self.network), + }, + SyncMessage::BatchProcessed { sync_type, result } => match sync_type { + ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => { + self.range_sync.handle_block_process_result( + &mut self.network, + chain_id, + epoch, result, - } => match process_type { - BlockProcessType::SingleBlock { id } => self - .block_lookups - .single_block_processed(id, result, &mut self.network), - BlockProcessType::ParentLookup { chain_hash } => self - .block_lookups - .parent_block_processed(chain_hash, result, &mut self.network), - }, - SyncMessage::BatchProcessed { sync_type, result } => match sync_type { - ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { - self.range_sync.handle_block_process_result( - &mut self.network, - chain_id, - epoch, - result, - ); + ); + self.update_sync_state(); + } + ChainSegmentProcessId::BackSyncBatchId(epoch) => { + match self.backfill_sync.on_batch_process_result( + &mut self.network, + epoch, + &result, + ) { + Ok(ProcessResult::Successful) => {} + Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), + Err(error) => { + error!(self.log, "Backfill sync failed"; "error" => ?error); + // Update the global status self.update_sync_state(); } - ChainSegmentProcessId::BackSyncBatchId(epoch) => { - match self.backfill_sync.on_batch_process_result( - &mut self.network, - epoch, - &result, - ) { - Ok(ProcessResult::Successful) => {} - Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), - Err(error) => { - error!(self.log, "Backfill sync failed"; "error" => ?error); - // Update the global status - self.update_sync_state(); - } - } - } - ChainSegmentProcessId::ParentLookup(chain_hash) => self - .block_lookups - .parent_chain_processed(chain_hash, result, &mut self.network), - }, + } + } + ChainSegmentProcessId::ParentLookup(chain_hash) => self + .block_lookups + .parent_chain_processed(chain_hash, result, &mut self.network), + }, + } + } + + fn handle_new_execution_engine_state(&mut self, engine_state: EngineState) { + self.network.update_execution_engine_state(engine_state); + + match engine_state { + EngineState::Online => { + // Resume sync components. + + // - Block lookups: + // We start searching for blocks again. This is done by updating the stored ee online + // state. No further action required. + + // - Parent lookups: + // We start searching for parents again. This is done by updating the stored ee + // online state. No further action required. + + // - Range: + // Actively resume. + self.range_sync.resume(&mut self.network); + + // - Backfill: + // Not affected by ee states, nothing to do. + } + + EngineState::Offline => { + // Pause sync components. + + // - Block lookups: + // Disabled while in this state. We drop current requests and don't search for new + // blocks. + let dropped_single_blocks_requests = + self.block_lookups.drop_single_block_requests(); + + // - Parent lookups: + // Disabled while in this state. We drop current requests and don't search for new + // blocks. + let dropped_parent_chain_requests = self.block_lookups.drop_parent_chain_requests(); + + // - Range: + // We still send found peers to range so that it can keep track of potential chains + // with respect to our current peers. Range will stop processing batches in the + // meantime. No further action from the manager is required for this. + + // - Backfill: Not affected by ee states, nothing to do. + + // Some logs. + if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 { + debug!(self.log, "Execution engine not online, dropping active requests."; + "dropped_single_blocks_requests" => dropped_single_blocks_requests, + "dropped_parent_chain_requests" => dropped_parent_chain_requests, + ); } } } @@ -563,7 +646,7 @@ impl SyncManager { &mut self, request_id: RequestId, peer_id: PeerId, - beacon_block: Option>>, + beacon_block: Option>>, seen_timestamp: Duration, ) { match request_id { @@ -591,7 +674,7 @@ impl SyncManager { batch_id, &peer_id, id, - beacon_block.map(|b| *b), + beacon_block, ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::Successful) => {} @@ -613,7 +696,7 @@ impl SyncManager { chain_id, batch_id, id, - beacon_block.map(|b| *b), + beacon_block, ); self.update_sync_state(); } @@ -621,3 +704,18 @@ impl SyncManager { } } } + +impl From>> for BlockProcessResult { + fn from(result: Result>) -> Self { + match result { + Ok(_) => BlockProcessResult::Ok, + Err(e) => e.into(), + } + } +} + +impl From> for BlockProcessResult { + fn from(e: BlockError) -> Self { + BlockProcessResult::Err(e) + } +} diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 7a891de728..dc18a5c981 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -9,4 +9,4 @@ mod peer_sync_info; mod range_sync; pub use manager::{BatchProcessResult, SyncMessage}; -pub use range_sync::ChainId; +pub use range_sync::{BatchOperationOutcome, ChainId}; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 96bdc533f8..45ade7034c 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -3,24 +3,25 @@ use super::manager::{Id, RequestId as SyncRequestId}; use super::range_sync::{BatchId, ChainId}; +use crate::beacon_processor::WorkEvent; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; +use beacon_chain::{BeaconChainTypes, EngineState}; use fnv::FnvHashMap; use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use slog::{debug, trace, warn}; use std::sync::Arc; use tokio::sync::mpsc; -use types::EthSpec; /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. -pub struct SyncNetworkContext { +pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender>, + network_send: mpsc::UnboundedSender>, /// Access to the network global vars. - network_globals: Arc>, + network_globals: Arc>, /// A sequential ID for all RPC requests. request_id: Id, @@ -28,24 +29,35 @@ pub struct SyncNetworkContext { /// BlocksByRange requests made by the range syncing algorithm. range_requests: FnvHashMap, + /// BlocksByRange requests made by backfill syncing. backfill_requests: FnvHashMap, + /// Whether the ee is online. If it's not, we don't allow access to the + /// `beacon_processor_send`. + execution_engine_state: EngineState, + + /// Channel to send work to the beacon processor. + beacon_processor_send: mpsc::Sender>, + /// Logger for the `SyncNetworkContext`. log: slog::Logger, } -impl SyncNetworkContext { +impl SyncNetworkContext { pub fn new( - network_send: mpsc::UnboundedSender>, - network_globals: Arc>, + network_send: mpsc::UnboundedSender>, + network_globals: Arc>, + beacon_processor_send: mpsc::Sender>, log: slog::Logger, ) -> Self { Self { network_send, + execution_engine_state: EngineState::Online, // always assume `Online` at the start network_globals, request_id: 1, range_requests: FnvHashMap::default(), backfill_requests: FnvHashMap::default(), + beacon_processor_send, log, } } @@ -65,27 +77,26 @@ impl SyncNetworkContext { chain: &C, peers: impl Iterator, ) { - if let Ok(status_message) = chain.status_message() { - for peer_id in peers { - debug!( - self.log, - "Sending Status Request"; - "peer" => %peer_id, - "fork_digest" => ?status_message.fork_digest, - "finalized_root" => ?status_message.finalized_root, - "finalized_epoch" => ?status_message.finalized_epoch, - "head_root" => %status_message.head_root, - "head_slot" => %status_message.head_slot, - ); + let status_message = chain.status_message(); + for peer_id in peers { + debug!( + self.log, + "Sending Status Request"; + "peer" => %peer_id, + "fork_digest" => ?status_message.fork_digest, + "finalized_root" => ?status_message.finalized_root, + "finalized_epoch" => ?status_message.finalized_epoch, + "head_root" => %status_message.head_root, + "head_slot" => %status_message.head_slot, + ); - let request = Request::Status(status_message.clone()); - let request_id = RequestId::Router; - let _ = self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request, - request_id, - }); - } + let request = Request::Status(status_message.clone()); + let request_id = RequestId::Router; + let _ = self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request, + request_id, + }); } } @@ -212,6 +223,16 @@ impl SyncNetworkContext { Ok(id) } + pub fn is_execution_engine_online(&self) -> bool { + self.execution_engine_state == EngineState::Online + } + + pub fn update_execution_engine_state(&mut self, engine_state: EngineState) { + debug!(self.log, "Sync's view on execution engine state updated"; + "past_state" => ?self.execution_engine_state, "new_state" => ?engine_state); + self.execution_engine_state = engine_state; + } + /// Terminates the connection with the peer and bans them. pub fn goodbye_peer(&mut self, peer_id: PeerId, reason: GoodbyeReason) { self.network_send @@ -250,13 +271,22 @@ impl SyncNetworkContext { } /// Sends an arbitrary network message. - fn send_network_msg(&mut self, msg: NetworkMessage) -> Result<(), &'static str> { + fn send_network_msg(&mut self, msg: NetworkMessage) -> Result<(), &'static str> { self.network_send.send(msg).map_err(|_| { debug!(self.log, "Could not send message to the network service"); "Network channel send Failed" }) } + pub fn processor_channel_if_enabled(&self) -> Option<&mpsc::Sender>> { + self.is_execution_engine_online() + .then_some(&self.beacon_processor_send) + } + + pub fn processor_channel(&self) -> &mpsc::Sender> { + &self.beacon_processor_send + } + fn next_id(&mut self) -> Id { let id = self.request_id; self.request_id += 1; diff --git a/beacon_node/network/src/sync/peer_sync_info.rs b/beacon_node/network/src/sync/peer_sync_info.rs index ed3f07763c..c01366f1be 100644 --- a/beacon_node/network/src/sync/peer_sync_info.rs +++ b/beacon_node/network/src/sync/peer_sync_info.rs @@ -59,7 +59,7 @@ pub fn remote_sync_type( if remote.head_slot < near_range_start { PeerSyncType::Behind } else if remote.head_slot > near_range_end - && !chain.fork_choice.read().contains_block(&remote.head_root) + && !chain.block_is_known_to_fork_choice(&remote.head_root) { // This peer has a head ahead enough of ours and we have no knowledge of their best // block. @@ -74,7 +74,7 @@ pub fn remote_sync_type( if (local.finalized_epoch + 1 == remote.finalized_epoch && near_range_start <= remote.head_slot && remote.head_slot <= near_range_end) - || chain.fork_choice.read().contains_block(&remote.head_root) + || chain.block_is_known_to_fork_choice(&remote.head_root) { // This peer is near enough to us to be considered synced, or // we have already synced up to this peer's head diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 614bf57dd0..3eee7223db 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -4,6 +4,7 @@ use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; +use std::sync::Arc; use types::{Epoch, EthSpec, SignedBeaconBlock, Slot}; /// The number of times to retry a batch before it is considered failed. @@ -46,7 +47,7 @@ pub trait BatchConfig { /// Note that simpler hashing functions considered in the past (hash of first block, hash of last /// block, number of received blocks) are not good enough to differentiate attempts. For this /// reason, we hash the complete set of blocks both in RangeSync and BackFillSync. - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64; + fn batch_attempt_hash(blocks: &[Arc>]) -> u64; } pub struct RangeSyncBatchConfig {} @@ -58,7 +59,7 @@ impl BatchConfig for RangeSyncBatchConfig { fn max_batch_processing_attempts() -> u8 { MAX_BATCH_PROCESSING_ATTEMPTS } - fn batch_attempt_hash(blocks: &[SignedBeaconBlock]) -> u64 { + fn batch_attempt_hash(blocks: &[Arc>]) -> u64 { let mut hasher = std::collections::hash_map::DefaultHasher::new(); blocks.hash(&mut hasher); hasher.finish() @@ -69,8 +70,17 @@ impl BatchConfig for RangeSyncBatchConfig { // Such errors should never be encountered. pub struct WrongState(pub(crate) String); -/// Auxiliary type alias for readability. -type IsFailed = bool; +/// After batch operations, we use this to communicate whether a batch can continue or not +pub enum BatchOperationOutcome { + Continue, + Failed { blacklist: bool }, +} + +pub enum BatchProcessingResult { + Success, + FaultyFailure, + NonFaultyFailure, +} /// A segment of a chain. pub struct BatchInfo { @@ -80,6 +90,8 @@ pub struct BatchInfo { end_slot: Slot, /// The `Attempts` that have been made and failed to send us this batch. failed_processing_attempts: Vec, + /// Number of processing attempts that have failed but we do not count. + non_faulty_processing_attempts: u8, /// The number of download retries this batch has undergone due to a failed request. failed_download_attempts: Vec, /// State of the batch. @@ -93,9 +105,9 @@ pub enum BatchState { /// The batch has failed either downloading or processing, but can be requested again. AwaitingDownload, /// The batch is being downloaded. - Downloading(PeerId, Vec>, Id), + Downloading(PeerId, Vec>>, Id), /// The batch has been completely downloaded and is ready for processing. - AwaitingProcessing(PeerId, Vec>), + AwaitingProcessing(PeerId, Vec>>), /// The batch is being processed. Processing(Attempt), /// The batch was successfully processed and is waiting to be validated. @@ -116,14 +128,6 @@ impl BatchState { pub fn poison(&mut self) -> BatchState { std::mem::replace(self, BatchState::Poisoned) } - - pub fn is_failed(&self) -> IsFailed { - match self { - BatchState::Failed => true, - BatchState::Poisoned => unreachable!("Poisoned batch"), - _ => false, - } - } } impl BatchInfo { @@ -143,6 +147,7 @@ impl BatchInfo { end_slot, failed_processing_attempts: Vec::new(), failed_download_attempts: Vec::new(), + non_faulty_processing_attempts: 0, state: BatchState::AwaitingDownload, marker: std::marker::PhantomData, } @@ -166,7 +171,16 @@ impl BatchInfo { peers } - /// Verifies if an incomming block belongs to this batch. + /// Return the number of times this batch has failed downloading and failed processing, in this + /// order. + pub fn failed_attempts(&self) -> (usize, usize) { + ( + self.failed_download_attempts.len(), + self.failed_processing_attempts.len(), + ) + } + + /// Verifies if an incoming block belongs to this batch. pub fn is_expecting_block(&self, peer_id: &PeerId, request_id: &Id) -> bool { if let BatchState::Downloading(expected_peer, _, expected_id) = &self.state { return peer_id == expected_peer && expected_id == request_id; @@ -191,7 +205,20 @@ impl BatchInfo { BlocksByRangeRequest { start_slot: self.start_slot.into(), count: self.end_slot.sub(self.start_slot).into(), - step: 1, + } + } + + /// After different operations over a batch, this could be in a state that allows it to + /// continue, or in failed state. When the batch has failed, we check if it did mainly due to + /// processing failures. In this case the batch is considered failed and faulty. + pub fn outcome(&self) -> BatchOperationOutcome { + match self.state { + BatchState::Poisoned => unreachable!("Poisoned batch"), + BatchState::Failed => BatchOperationOutcome::Failed { + blacklist: self.failed_processing_attempts.len() + > self.failed_download_attempts.len(), + }, + _ => BatchOperationOutcome::Continue, } } @@ -204,7 +231,7 @@ impl BatchInfo { } /// Adds a block to a downloading batch. - pub fn add_block(&mut self, block: SignedBeaconBlock) -> Result<(), WrongState> { + pub fn add_block(&mut self, block: Arc>) -> Result<(), WrongState> { match self.state.poison() { BatchState::Downloading(peer, mut blocks, req_id) => { blocks.push(block); @@ -227,7 +254,10 @@ impl BatchInfo { #[must_use = "Batch may have failed"] pub fn download_completed( &mut self, - ) -> Result> { + ) -> Result< + usize, /* Received blocks */ + Result<(Slot, Slot, BatchOperationOutcome), WrongState>, + > { match self.state.poison() { BatchState::Downloading(peer, blocks, _request_id) => { // verify that blocks are in range @@ -256,7 +286,7 @@ impl BatchInfo { BatchState::AwaitingDownload }; - return Err(Ok((expected, received, self.state.is_failed()))); + return Err(Ok((expected, received, self.outcome()))); } } @@ -281,7 +311,10 @@ impl BatchInfo { /// THe `mark_failed` parameter, when set to false, does not increment the failed attempts of /// this batch and register the peer, rather attempts a re-download. #[must_use = "Batch may have failed"] - pub fn download_failed(&mut self, mark_failed: bool) -> Result { + pub fn download_failed( + &mut self, + mark_failed: bool, + ) -> Result { match self.state.poison() { BatchState::Downloading(peer, _, _request_id) => { // register the attempt and check if the batch can be tried again @@ -296,7 +329,7 @@ impl BatchInfo { // drop the blocks BatchState::AwaitingDownload }; - Ok(self.state.is_failed()) + Ok(self.outcome()) } BatchState::Poisoned => unreachable!("Poisoned batch"), other => { @@ -330,7 +363,7 @@ impl BatchInfo { } } - pub fn start_processing(&mut self) -> Result>, WrongState> { + pub fn start_processing(&mut self) -> Result>>, WrongState> { match self.state.poison() { BatchState::AwaitingProcessing(peer, blocks) => { self.state = BatchState::Processing(Attempt::new::(peer, &blocks)); @@ -348,25 +381,34 @@ impl BatchInfo { } #[must_use = "Batch may have failed"] - pub fn processing_completed(&mut self, was_sucessful: bool) -> Result { + pub fn processing_completed( + &mut self, + procesing_result: BatchProcessingResult, + ) -> Result { match self.state.poison() { BatchState::Processing(attempt) => { - self.state = if !was_sucessful { - // register the failed attempt - self.failed_processing_attempts.push(attempt); + self.state = match procesing_result { + BatchProcessingResult::Success => BatchState::AwaitingValidation(attempt), + BatchProcessingResult::FaultyFailure => { + // register the failed attempt + self.failed_processing_attempts.push(attempt); - // check if the batch can be downloaded again - if self.failed_processing_attempts.len() - >= B::max_batch_processing_attempts() as usize - { - BatchState::Failed - } else { + // check if the batch can be downloaded again + if self.failed_processing_attempts.len() + >= B::max_batch_processing_attempts() as usize + { + BatchState::Failed + } else { + BatchState::AwaitingDownload + } + } + BatchProcessingResult::NonFaultyFailure => { + self.non_faulty_processing_attempts = + self.non_faulty_processing_attempts.saturating_add(1); BatchState::AwaitingDownload } - } else { - BatchState::AwaitingValidation(attempt) }; - Ok(self.state.is_failed()) + Ok(self.outcome()) } BatchState::Poisoned => unreachable!("Poisoned batch"), other => { @@ -380,7 +422,7 @@ impl BatchInfo { } #[must_use = "Batch may have failed"] - pub fn validation_failed(&mut self) -> Result { + pub fn validation_failed(&mut self) -> Result { match self.state.poison() { BatchState::AwaitingValidation(attempt) => { self.failed_processing_attempts.push(attempt); @@ -393,7 +435,7 @@ impl BatchInfo { } else { BatchState::AwaitingDownload }; - Ok(self.state.is_failed()) + Ok(self.outcome()) } BatchState::Poisoned => unreachable!("Poisoned batch"), other => { @@ -419,7 +461,10 @@ pub struct Attempt { } impl Attempt { - fn new(peer_id: PeerId, blocks: &[SignedBeaconBlock]) -> Self { + fn new( + peer_id: PeerId, + blocks: &[Arc>], + ) -> Self { let hash = B::batch_attempt_hash(blocks); Attempt { peer_id, hash } } @@ -451,6 +496,7 @@ impl slog::KV for BatchInfo { )?; serializer.emit_usize("downloaded", self.failed_download_attempts.len())?; serializer.emit_usize("processed", self.failed_processing_attempts.len())?; + serializer.emit_u8("processed_no_penalty", self.non_faulty_processing_attempts)?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; slog::Result::Ok(()) } diff --git a/beacon_node/network/src/sync/range_sync/block_storage.rs b/beacon_node/network/src/sync/range_sync/block_storage.rs index 5f8033bc51..df49543a6b 100644 --- a/beacon_node/network/src/sync/range_sync/block_storage.rs +++ b/beacon_node/network/src/sync/range_sync/block_storage.rs @@ -8,6 +8,6 @@ pub trait BlockStorage { impl BlockStorage for BeaconChain { fn is_block_known(&self, block_root: &Hash256) -> bool { - self.fork_choice.read().contains_block(block_root) + self.block_is_known_to_fork_choice(block_root) } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 9f4142dd66..4226b600f5 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1,15 +1,16 @@ -use super::batch::{BatchInfo, BatchState}; -use crate::beacon_processor::ChainSegmentProcessId; -use crate::beacon_processor::WorkEvent as BeaconWorkEvent; -use crate::sync::{manager::Id, network_context::SyncNetworkContext, BatchProcessResult}; -use beacon_chain::BeaconChainTypes; +use super::batch::{BatchInfo, BatchProcessingResult, BatchState}; +use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; +use crate::sync::{ + manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, +}; +use beacon_chain::{BeaconChainTypes, CountUnrealized}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; use rand::seq::SliceRandom; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; -use tokio::sync::mpsc::Sender; +use std::sync::Arc; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of @@ -36,7 +37,11 @@ pub type ProcessingResult = Result; pub enum RemoveChain { EmptyPeerPool, ChainCompleted, - ChainFailed(BatchId), + /// A chain has failed. This boolean signals whether the chain should be blacklisted. + ChainFailed { + blacklist: bool, + failing_batch: BatchId, + }, WrongBatchState(String), WrongChainState(String), } @@ -96,8 +101,7 @@ pub struct SyncingChain { /// Batches validated by this chain. validated_batches: u64, - /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: Sender>, + is_finalized_segment: bool, /// The chain's log. log: slog::Logger, @@ -124,7 +128,7 @@ impl SyncingChain { target_head_slot: Slot, target_head_root: Hash256, peer_id: PeerId, - beacon_processor_send: Sender>, + is_finalized_segment: bool, log: &slog::Logger, ) -> Self { let mut peers = FnvHashMap::default(); @@ -146,7 +150,7 @@ impl SyncingChain { state: ChainSyncingState::Stopped, current_processing_batch: None, validated_batches: 0, - beacon_processor_send, + is_finalized_segment, log: log.new(o!("chain" => id)), } } @@ -176,14 +180,19 @@ impl SyncingChain { pub fn remove_peer( &mut self, peer_id: &PeerId, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, ) -> ProcessingResult { if let Some(batch_ids) = self.peers.remove(peer_id) { // fail the batches for id in batch_ids { if let Some(batch) = self.batches.get_mut(&id) { - if batch.download_failed(true)? { - return Err(RemoveChain::ChainFailed(id)); + if let BatchOperationOutcome::Failed { blacklist } = + batch.download_failed(true)? + { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: id, + }); } self.retry_batch_download(network, id)?; } else { @@ -212,11 +221,11 @@ impl SyncingChain { /// If the block correctly completes the batch it will be processed if possible. pub fn on_block_response( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, peer_id: &PeerId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) -> ProcessingResult { // check if we have this batch let batch = match self.batches.get_mut(&batch_id) { @@ -260,12 +269,15 @@ impl SyncingChain { self.process_completed_batches(network) } Err(result) => { - let (expected_boundary, received_boundary, is_failed) = result?; + let (expected_boundary, received_boundary, outcome) = result?; warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, "peer_id" => %peer_id, batch); - if is_failed { - return Err(RemoveChain::ChainFailed(batch_id)); + if let BatchOperationOutcome::Failed { blacklist } = outcome { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }); } // this batch can't be used, so we need to request it again. self.retry_batch_download(network, batch_id) @@ -278,7 +290,7 @@ impl SyncingChain { /// The batch must exist and be ready for processing fn process_batch( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, ) -> ProcessingResult { // Only process batches if this chain is Syncing, and only one at a time @@ -286,6 +298,11 @@ impl SyncingChain { return Ok(KeepChain); } + let beacon_processor_send = match network.processor_channel_if_enabled() { + Some(channel) => channel, + None => return Ok(KeepChain), + }; + let batch = match self.batches.get_mut(&batch_id) { Some(batch) => batch, None => { @@ -301,12 +318,16 @@ impl SyncingChain { // for removing chains and checking completion is in the callback. let blocks = batch.start_processing()?; - let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id); + let count_unrealized = if self.is_finalized_segment { + CountUnrealized::False + } else { + CountUnrealized::True + }; + let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized); self.current_processing_batch = Some(batch_id); - if let Err(e) = self - .beacon_processor_send - .try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) + if let Err(e) = + beacon_processor_send.try_send(BeaconWorkEvent::chain_segment(process_id, blocks)) { crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch", "error" => %e, "batch" => self.processing_target); @@ -314,14 +335,7 @@ impl SyncingChain { // blocks to continue, and the chain is expecting a processing result that won't // arrive. To mitigate this, (fake) fail this processing so that the batch is // re-downloaded. - self.on_batch_process_result( - network, - batch_id, - &BatchProcessResult::Failed { - imported_blocks: false, - peer_action: None, - }, - ) + self.on_batch_process_result(network, batch_id, &BatchProcessResult::NonFaultyFailure) } else { Ok(KeepChain) } @@ -330,7 +344,7 @@ impl SyncingChain { /// Processes the next ready batch, prioritizing optimistic batches over the processing target. fn process_completed_batches( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, ) -> ProcessingResult { // Only process batches if this chain is Syncing and only process one batch at a time if self.state != ChainSyncingState::Syncing || self.current_processing_batch.is_some() { @@ -431,13 +445,13 @@ impl SyncingChain { /// of the batch processor. pub fn on_batch_process_result( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, result: &BatchProcessResult, ) -> ProcessingResult { // the first two cases are possible if the chain advances while waiting for a processing // result - match &self.current_processing_batch { + let batch = match &self.current_processing_batch { Some(processing_id) if *processing_id != batch_id => { debug!(self.log, "Unexpected batch result"; "batch_epoch" => batch_id, "expected_batch_epoch" => processing_id); @@ -451,22 +465,35 @@ impl SyncingChain { _ => { // batch_id matches, continue self.current_processing_batch = None; - } - } - - match result { - BatchProcessResult::Success(was_non_empty) => { - let batch = self.batches.get_mut(&batch_id).ok_or_else(|| { + self.batches.get_mut(&batch_id).ok_or_else(|| { RemoveChain::WrongChainState(format!( "Current processing batch not found: {}", batch_id )) - })?; + })? + } + }; + + let peer = batch.current_peer().cloned().ok_or_else(|| { + RemoveChain::WrongBatchState(format!( + "Processing target is in wrong state: {:?}", + batch.state(), + )) + })?; + + // Log the process result and the batch for debugging purposes. + debug!(self.log, "Batch processing result"; "result" => ?result, &batch, + "batch_epoch" => batch_id, "client" => %network.client_type(&peer)); + + // We consider three cases. Batch was successfully processed, Batch failed processing due + // to a faulty peer, or batch failed processing but the peer can't be deemed faulty. + match result { + BatchProcessResult::Success { was_non_empty } => { + batch.processing_completed(BatchProcessingResult::Success)?; - batch.processing_completed(true)?; - // If the processed batch was not empty, we can validate previous unvalidated - // blocks. if *was_non_empty { + // If the processed batch was not empty, we can validate previous unvalidated + // blocks. self.advance_chain(network, batch_id); // we register so that on chain switching we don't try it again self.attempted_optimistic_starts.insert(batch_id); @@ -496,66 +523,62 @@ impl SyncingChain { self.process_completed_batches(network) } } - BatchProcessResult::Failed { + BatchProcessResult::FaultyFailure { imported_blocks, - peer_action, + penalty, } => { - let batch = self.batches.get_mut(&batch_id).ok_or_else(|| { - RemoveChain::WrongChainState(format!( - "Batch not found for current processing target {}", - batch_id - )) - })?; - let peer = batch.current_peer().cloned().ok_or_else(|| { - RemoveChain::WrongBatchState(format!( - "Processing target is in wrong state: {:?}", - batch.state(), - )) - })?; - debug!(self.log, "Batch processing failed"; "imported_blocks" => imported_blocks, - "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); - if batch.processing_completed(false)? { - // check that we have not exceeded the re-process retry counter - // If a batch has exceeded the invalid batch lookup attempts limit, it means - // that it is likely all peers in this chain are are sending invalid batches - // repeatedly and are either malicious or faulty. We drop the chain and - // report all peers. - // There are some edge cases with forks that could land us in this situation. - // This should be unlikely, so we tolerate these errors, but not often. - warn!( - self.log, - "Batch failed to download. Dropping chain scoring peers"; - "score_adjustment" => %peer_action - .as_ref() - .map(ToString::to_string) - .unwrap_or_else(|| "None".into()), - "batch_epoch"=> batch_id - ); + // Penalize the peer appropiately. + network.report_peer(peer, *penalty, "faulty_batch"); - if let Some(peer_action) = peer_action { - for (peer, _) in self.peers.drain() { - network.report_peer(peer, *peer_action, "batch_failed"); + // Check if this batch is allowed to continue + match batch.processing_completed(BatchProcessingResult::FaultyFailure)? { + BatchOperationOutcome::Continue => { + // Chain can continue. Check if it can be moved forward. + if *imported_blocks { + // At least one block was successfully verified and imported, so we can be sure all + // previous batches are valid and we only need to download the current failed + // batch. + self.advance_chain(network, batch_id); } + // Handle this invalid batch, that is within the re-process retries limit. + self.handle_invalid_batch(network, batch_id) } - Err(RemoveChain::ChainFailed(batch_id)) - } else { - // chain can continue. Check if it can be moved forward - if *imported_blocks { - // At least one block was successfully verified and imported, so we can be sure all - // previous batches are valid and we only need to download the current failed - // batch. - self.advance_chain(network, batch_id); + BatchOperationOutcome::Failed { blacklist } => { + // Check that we have not exceeded the re-process retry counter, + // If a batch has exceeded the invalid batch lookup attempts limit, it means + // that it is likely all peers in this chain are are sending invalid batches + // repeatedly and are either malicious or faulty. We drop the chain and + // report all peers. + // There are some edge cases with forks that could land us in this situation. + // This should be unlikely, so we tolerate these errors, but not often. + warn!( + self.log, + "Batch failed to download. Dropping chain scoring peers"; + "score_adjustment" => %penalty, + "batch_epoch"=> batch_id, + ); + + for (peer, _) in self.peers.drain() { + network.report_peer(peer, *penalty, "faulty_chain"); + } + Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }) } - // Handle this invalid batch, that is within the re-process retries limit. - self.handle_invalid_batch(network, batch_id) } } + BatchProcessResult::NonFaultyFailure => { + batch.processing_completed(BatchProcessingResult::NonFaultyFailure)?; + // Simply redownload the batch. + self.retry_batch_download(network, batch_id) + } } } fn reject_optimistic_batch( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, redownload: bool, reason: &str, ) -> ProcessingResult { @@ -586,11 +609,7 @@ impl SyncingChain { /// /// If a previous batch has been validated and it had been re-processed, penalize the original /// peer. - fn advance_chain( - &mut self, - network: &mut SyncNetworkContext, - validating_epoch: Epoch, - ) { + fn advance_chain(&mut self, network: &mut SyncNetworkContext, validating_epoch: Epoch) { // make sure this epoch produces an advancement if validating_epoch <= self.start_epoch { return; @@ -694,7 +713,7 @@ impl SyncingChain { /// intended and can result in downvoting a peer. fn handle_invalid_batch( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, ) -> ProcessingResult { // The current batch could not be processed, indicating either the current or previous @@ -722,9 +741,12 @@ impl SyncingChain { let mut redownload_queue = Vec::new(); for (id, batch) in self.batches.range_mut(..batch_id) { - if batch.validation_failed()? { + if let BatchOperationOutcome::Failed { blacklist } = batch.validation_failed()? { // remove the chain early - return Err(RemoveChain::ChainFailed(batch_id)); + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: *id, + }); } redownload_queue.push(*id); } @@ -750,7 +772,7 @@ impl SyncingChain { /// This could be new chain, or an old chain that is being resumed. pub fn start_syncing( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, local_finalized_epoch: Epoch, optimistic_start_epoch: Epoch, ) -> ProcessingResult { @@ -788,7 +810,7 @@ impl SyncingChain { /// If the chain is active, this starts requesting batches from this peer. pub fn add_peer( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, peer_id: PeerId, ) -> ProcessingResult { // add the peer without overwriting its active requests @@ -805,7 +827,7 @@ impl SyncingChain { /// If the batch exists it is re-requested. pub fn inject_error( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, peer_id: &PeerId, request_id: Id, @@ -821,8 +843,11 @@ impl SyncingChain { if let Some(active_requests) = self.peers.get_mut(peer_id) { active_requests.remove(&batch_id); } - if batch.download_failed(true)? { - return Err(RemoveChain::ChainFailed(batch_id)); + if let BatchOperationOutcome::Failed { blacklist } = batch.download_failed(true)? { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }); } self.retry_batch_download(network, batch_id) } else { @@ -834,7 +859,7 @@ impl SyncingChain { /// Sends and registers the request of a batch awaiting download. pub fn retry_batch_download( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, ) -> ProcessingResult { let batch = match self.batches.get_mut(&batch_id) { @@ -867,7 +892,7 @@ impl SyncingChain { /// Requests the batch assigned to the given id from a given peer. pub fn send_batch( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch_id: BatchId, peer: PeerId, ) -> ProcessingResult { @@ -910,10 +935,16 @@ impl SyncingChain { self.peers .get_mut(&peer) .map(|request| request.remove(&batch_id)); - if batch.download_failed(true)? { - return Err(RemoveChain::ChainFailed(batch_id)); - } else { - return self.retry_batch_download(network, batch_id); + match batch.download_failed(true)? { + BatchOperationOutcome::Failed { blacklist } => { + return Err(RemoveChain::ChainFailed { + blacklist, + failing_batch: batch_id, + }) + } + BatchOperationOutcome::Continue => { + return self.retry_batch_download(network, batch_id) + } } } } @@ -930,12 +961,21 @@ impl SyncingChain { } } + /// Kickstarts the chain by sending for processing batches that are ready and requesting more + /// batches if needed. + pub fn resume( + &mut self, + network: &mut SyncNetworkContext, + ) -> Result { + // Request more batches if needed. + self.request_batches(network)?; + // If there is any batch ready for processing, send it. + self.process_completed_batches(network) + } + /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer /// pool and left over batches until the batch buffer is reached or all peers are exhausted. - fn request_batches( - &mut self, - network: &mut SyncNetworkContext, - ) -> ProcessingResult { + fn request_batches(&mut self, network: &mut SyncNetworkContext) -> ProcessingResult { if !matches!(self.state, ChainSyncingState::Syncing) { return Ok(KeepChain); } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 512f7a989a..37a3f13e73 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -6,7 +6,6 @@ use super::block_storage::BlockStorage; use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; -use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::metrics; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::BeaconChainTypes; @@ -18,7 +17,6 @@ use smallvec::SmallVec; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::Arc; -use tokio::sync::mpsc; use types::EthSpec; use types::{Epoch, Hash256, Slot}; @@ -193,10 +191,9 @@ impl ChainCollection { /// do so. pub fn update( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, local: &SyncInfo, awaiting_head_peers: &mut HashMap, - beacon_processor_send: &mpsc::Sender>, ) { // Remove any outdated finalized/head chains self.purge_outdated_chains(local, awaiting_head_peers); @@ -212,7 +209,6 @@ impl ChainCollection { local.finalized_epoch, local_head_epoch, awaiting_head_peers, - beacon_processor_send, ); } } @@ -257,7 +253,7 @@ impl ChainCollection { /// or not. fn update_finalized_chains( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, local_epoch: Epoch, local_head_epoch: Epoch, ) { @@ -326,11 +322,10 @@ impl ChainCollection { /// Start syncing any head chains if required. fn update_head_chains( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, local_epoch: Epoch, local_head_epoch: Epoch, awaiting_head_peers: &mut HashMap, - beacon_processor_send: &mpsc::Sender>, ) { // Include the awaiting head peers for (peer_id, peer_sync_info) in awaiting_head_peers.drain() { @@ -341,7 +336,6 @@ impl ChainCollection { peer_sync_info.head_slot, peer_id, RangeSyncType::Head, - beacon_processor_send, network, ); } @@ -407,7 +401,6 @@ impl ChainCollection { local_info: &SyncInfo, awaiting_head_peers: &mut HashMap, ) { - debug!(self.log, "Purging chains"); let local_finalized_slot = local_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -416,10 +409,7 @@ impl ChainCollection { let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { - let is = - target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root); - debug!(log_ref, "Chain is outdated {}", is); - is + target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root) }; // Retain only head peers that remain relevant @@ -472,14 +462,13 @@ impl ChainCollection { target_head_slot: Slot, peer: PeerId, sync_type: RangeSyncType, - beacon_processor_send: &mpsc::Sender>, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, ) { let id = SyncingChain::::id(&target_head_root, &target_head_slot); - let collection = if let RangeSyncType::Finalized = sync_type { - &mut self.finalized_chains + let (collection, is_finalized) = if let RangeSyncType::Finalized = sync_type { + (&mut self.finalized_chains, true) } else { - &mut self.head_chains + (&mut self.head_chains, false) }; match collection.entry(id) { Entry::Occupied(mut entry) => { @@ -504,7 +493,7 @@ impl ChainCollection { target_head_slot, target_head_root, peer, - beacon_processor_send.clone(), + is_finalized, &self.log, ); debug_assert_eq!(new_chain.get_id(), id); diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index b4a27c23c7..f4db32bc96 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -8,7 +8,7 @@ mod chain_collection; mod range; mod sync_type; -pub use batch::{BatchConfig, BatchInfo, BatchState}; +pub use batch::{BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState}; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; pub use range::RangeSync; pub use sync_type::RangeSyncType; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 9953df81d0..2531454387 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -43,7 +43,6 @@ use super::block_storage::BlockStorage; use super::chain::{BatchId, ChainId, RemoveChain, SyncingChain}; use super::chain_collection::ChainCollection; use super::sync_type::RangeSyncType; -use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::status::ToStatusMessage; use crate::sync::manager::Id; use crate::sync::network_context::SyncNetworkContext; @@ -53,10 +52,9 @@ use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; use lru_cache::LRUTimeCache; -use slog::{crit, debug, error, trace, warn}; +use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use tokio::sync::mpsc; use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// For how long we store failed finalized chains to prevent retries. @@ -76,8 +74,6 @@ pub struct RangeSync> { chains: ChainCollection, /// Chains that have failed and are stored to prevent being retried. failed_chains: LRUTimeCache, - /// A multi-threaded, non-blocking processor for applying messages to the beacon chain. - beacon_processor_send: mpsc::Sender>, /// The syncing logger. log: slog::Logger, } @@ -87,11 +83,7 @@ where C: BlockStorage + ToStatusMessage, T: BeaconChainTypes, { - pub fn new( - beacon_chain: Arc, - beacon_processor_send: mpsc::Sender>, - log: slog::Logger, - ) -> Self { + pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), chains: ChainCollection::new(beacon_chain, log.clone()), @@ -99,7 +91,6 @@ where FAILED_CHAINS_EXPIRY_SECONDS, )), awaiting_head_peers: HashMap::new(), - beacon_processor_send, log, } } @@ -117,7 +108,7 @@ where /// prioritised by peer-pool size. pub fn add_peer( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, local_info: SyncInfo, peer_id: PeerId, remote_info: SyncInfo, @@ -159,16 +150,11 @@ where remote_finalized_slot, peer_id, RangeSyncType::Finalized, - &self.beacon_processor_send, network, ); - self.chains.update( - network, - &local_info, - &mut self.awaiting_head_peers, - &self.beacon_processor_send, - ); + self.chains + .update(network, &local_info, &mut self.awaiting_head_peers); } RangeSyncType::Head => { // This peer requires a head chain sync @@ -197,15 +183,10 @@ where remote_info.head_slot, peer_id, RangeSyncType::Head, - &self.beacon_processor_send, network, ); - self.chains.update( - network, - &local_info, - &mut self.awaiting_head_peers, - &self.beacon_processor_send, - ); + self.chains + .update(network, &local_info, &mut self.awaiting_head_peers); } } } @@ -216,12 +197,12 @@ where /// This request could complete a chain or simply add to its progress. pub fn blocks_by_range_response( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, peer_id: PeerId, chain_id: ChainId, batch_id: BatchId, request_id: Id, - beacon_block: Option>, + beacon_block: Option>>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { @@ -246,7 +227,7 @@ where pub fn handle_block_process_result( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, chain_id: ChainId, batch_id: Epoch, result: BatchProcessResult, @@ -276,11 +257,7 @@ where /// A peer has disconnected. This removes the peer from any ongoing chains and mappings. A /// disconnected peer could remove a chain - pub fn peer_disconnect( - &mut self, - network: &mut SyncNetworkContext, - peer_id: &PeerId, - ) { + pub fn peer_disconnect(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { // if the peer is in the awaiting head mapping, remove it self.awaiting_head_peers.remove(peer_id); @@ -292,7 +269,7 @@ where /// which pool the peer is in. The chain may also have a batch or batches awaiting /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain. - fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { + fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { for (removed_chain, sync_type, remove_reason) in self .chains .call_all(|chain| chain.remove_peer(peer_id, network)) @@ -304,8 +281,6 @@ where network, "peer removed", ); - - // update the state of the collection } } @@ -315,7 +290,7 @@ where /// been too many failed attempts for the batch, remove the chain. pub fn inject_error( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, peer_id: PeerId, batch_id: BatchId, chain_id: ChainId, @@ -347,7 +322,7 @@ where chain: SyncingChain, sync_type: RangeSyncType, remove_reason: RemoveChain, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, op: &'static str, ) { if remove_reason.is_critical() { @@ -356,8 +331,8 @@ where debug!(self.log, "Chain removed"; "sync_type" => ?sync_type, &chain, "reason" => ?remove_reason, "op" => op); } - if let RemoveChain::ChainFailed(_) = remove_reason { - if RangeSyncType::Finalized == sync_type { + if let RemoveChain::ChainFailed { blacklist, .. } = remove_reason { + if RangeSyncType::Finalized == sync_type && blacklist { warn!(self.log, "Chain failed! Syncing to its head won't be retried for at least the next {} seconds", FAILED_CHAINS_EXPIRY_SECONDS; &chain); self.failed_chains.insert(chain.target_head_root); } @@ -365,26 +340,32 @@ where network.status_peers(self.beacon_chain.as_ref(), chain.peers()); - let local = match self.beacon_chain.status_message() { - Ok(status) => SyncInfo { - head_slot: status.head_slot, - head_root: status.head_root, - finalized_epoch: status.finalized_epoch, - finalized_root: status.finalized_root, - }, - Err(e) => { - return error!(self.log, "Failed to get peer sync info"; - "msg" => "likely due to head lock contention", "err" => ?e) - } + let status = self.beacon_chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, }; // update the state of the collection - self.chains.update( - network, - &local, - &mut self.awaiting_head_peers, - &self.beacon_processor_send, - ); + self.chains + .update(network, &local, &mut self.awaiting_head_peers); + } + + /// Kickstarts sync. + pub fn resume(&mut self, network: &mut SyncNetworkContext) { + for (removed_chain, sync_type, remove_reason) in + self.chains.call_all(|chain| chain.resume(network)) + { + self.on_chain_removed( + removed_chain, + sync_type, + remove_reason, + network, + "chain resumed", + ); + } } } @@ -394,13 +375,16 @@ mod tests { use crate::NetworkMessage; use super::*; + use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use beacon_chain::builder::Witness; use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::parking_lot::RwLock; + use beacon_chain::EngineState; use lighthouse_network::rpc::BlocksByRangeRequest; use lighthouse_network::Request; use lighthouse_network::{rpc::StatusMessage, NetworkGlobals}; use slog::{o, Drain}; + use tokio::sync::mpsc; use slot_clock::SystemTimeSlotClock; use std::collections::HashSet; @@ -447,8 +431,8 @@ mod tests { } impl ToStatusMessage for FakeStorage { - fn status_message(&self) -> Result { - Ok(self.status.read().clone()) + fn status_message(&self) -> StatusMessage { + self.status.read().clone() } } @@ -475,7 +459,7 @@ mod tests { /// To set up different scenarios where sync is told about known/unkown blocks. chain: Arc, /// Needed by range to handle communication with the network. - cx: SyncNetworkContext, + cx: SyncNetworkContext, /// To check what the network receives from Range. network_rx: mpsc::UnboundedReceiver>, /// To modify what the network declares about various global variables, in particular about @@ -521,12 +505,13 @@ mod tests { } /// Reads an BlocksByRange request to a given peer from the network receiver channel. + #[track_caller] fn grab_request(&mut self, expected_peer: &PeerId) -> (RequestId, BlocksByRangeRequest) { - if let Some(NetworkMessage::SendRequest { + if let Ok(NetworkMessage::SendRequest { peer_id, request: Request::BlocksByRange(request), request_id, - }) = self.network_rx.blocking_recv() + }) = self.network_rx.try_recv() { assert_eq!(&peer_id, expected_peer); (request_id, request) @@ -580,6 +565,29 @@ mod tests { let peer_id = PeerId::random(); (peer_id, local_info, remote_info) } + + #[track_caller] + fn expect_empty_processor(&mut self) { + match self.beacon_processor_rx.try_recv() { + Ok(work) => { + panic!("Expected empty processor. Instead got {}", work.work_type()); + } + Err(e) => match e { + mpsc::error::TryRecvError::Empty => {} + mpsc::error::TryRecvError::Disconnected => unreachable!("bad coded test?"), + }, + } + } + + #[track_caller] + fn expect_chain_segment(&mut self) { + match self.beacon_processor_rx.try_recv() { + Ok(work) => { + assert_eq!(work.work_type(), crate::beacon_processor::CHAIN_SEGMENT); + } + other => panic!("Expected chain segment process, found {:?}", other), + } + } } fn range(log_enabled: bool) -> (TestRig, RangeSync) { @@ -588,7 +596,6 @@ mod tests { let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(10); let range_sync = RangeSync::::new( chain.clone(), - beacon_processor_tx, log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); @@ -596,6 +603,7 @@ mod tests { let cx = SyncNetworkContext::new( network_tx, globals.clone(), + beacon_processor_tx, log.new(o!("component" => "network_context")), ); let test_rig = TestRig { @@ -666,4 +674,53 @@ mod tests { let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); } + + #[test] + fn pause_and_resume_on_ee_offline() { + let (mut rig, mut range) = range(true); + + // add some peers + let (peer1, local_info, head_info) = rig.head_peer(); + range.add_peer(&mut rig.cx, local_info, peer1, head_info); + let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { + RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { + (rig.cx.range_sync_response(id, true).unwrap(), id) + } + other => panic!("unexpected request {:?}", other), + }; + + // make the ee offline + rig.cx.update_execution_engine_state(EngineState::Offline); + + // send the response to the request + range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, None); + + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + + // while the ee is offline, more peers might arrive. Add a new finalized peer. + let (peer2, local_info, finalized_info) = rig.finalized_peer(); + range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); + let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { + RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => { + (rig.cx.range_sync_response(id, true).unwrap(), id) + } + other => panic!("unexpected request {:?}", other), + }; + + // send the response to the request + range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, None); + + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + + // make the beacon processor available again. + rig.cx.update_execution_engine_state(EngineState::Online); + + // now resume range, we should have two processing requests in the beacon processor. + range.resume(&mut rig.cx); + + rig.expect_chain_segment(); + rig.expect_chain_segment(); + } } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 84d23a4562..1d67ecdccc 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -18,6 +18,9 @@ rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" store = { path = "../store" } +bitvec = "1" [dev-dependencies] beacon_chain = { path = "../beacon_chain" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } +maplit = "1.0.2" diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 9d9be8ef35..dc57a20a2c 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -1,5 +1,6 @@ +use crate::attestation_storage::AttestationRef; use crate::max_cover::MaxCover; -use crate::RewardCache; +use crate::reward_cache::RewardCache; use state_processing::common::{ altair, base, get_attestation_participation_flag_indices, get_attesting_indices, }; @@ -13,14 +14,14 @@ use types::{ #[derive(Debug, Clone)] pub struct AttMaxCover<'a, T: EthSpec> { /// Underlying attestation. - pub att: &'a Attestation, + pub att: AttestationRef<'a, T>, /// Mapping of validator indices and their rewards. pub fresh_validators_rewards: HashMap, } impl<'a, T: EthSpec> AttMaxCover<'a, T> { pub fn new( - att: &'a Attestation, + att: AttestationRef<'a, T>, state: &BeaconState, reward_cache: &'a RewardCache, total_active_balance: u64, @@ -35,13 +36,13 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { /// Initialise an attestation cover object for base/phase0 hard fork. pub fn new_for_base( - att: &'a Attestation, + att: AttestationRef<'a, T>, state: &BeaconState, base_state: &BeaconStateBase, total_active_balance: u64, spec: &ChainSpec, ) -> Option { - let fresh_validators = earliest_attestation_validators(att, state, base_state); + let fresh_validators = earliest_attestation_validators(&att, state, base_state); let committee = state .get_beacon_committee(att.data.slot, att.data.index) .ok()?; @@ -69,41 +70,44 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { /// Initialise an attestation cover object for Altair or later. pub fn new_for_altair( - att: &'a Attestation, + att: AttestationRef<'a, T>, state: &BeaconState, reward_cache: &'a RewardCache, total_active_balance: u64, spec: &ChainSpec, ) -> Option { - // FIXME(sproul): could optimise out `get_attesting_indices` and allocations by storing - // these. - let committee = state - .get_beacon_committee(att.data.slot, att.data.index) - .ok()?; - let attesting_indices = - get_attesting_indices::(committee.committee, &att.aggregation_bits).ok()?; + let att_data = att.attestation_data(); - let inclusion_delay = state.slot().as_u64().checked_sub(att.data.slot.as_u64())?; + let inclusion_delay = state.slot().as_u64().checked_sub(att_data.slot.as_u64())?; let att_participation_flags = - get_attestation_participation_flag_indices(state, &att.data, inclusion_delay, spec) + get_attestation_participation_flag_indices(state, &att_data, inclusion_delay, spec) .ok()?; + let base_reward_per_increment = + altair::BaseRewardPerIncrement::new(total_active_balance, spec).ok()?; - let fresh_validators_rewards = attesting_indices + let fresh_validators_rewards = att + .indexed + .attesting_indices .iter() .filter_map(|&index| { - let mut proposer_reward_numerator = 0; - let participation = reward_cache - .get_epoch_participation(index, att.data.target.epoch) - .ok()??; + if reward_cache + .has_attested_in_epoch(index, att_data.target.epoch) + .ok()? + { + return None; + } - let effective_balance = reward_cache.get_effective_balance(index)?; + let mut proposer_reward_numerator = 0; + + // FIXME(sproul): store base_reward in reward cache + // let effective_balance = reward_cache.get_effective_balance(index)?; + let effective_balance = state.get_effective_balance(index as usize).ok()?; let base_reward = - altair::get_base_reward(effective_balance, total_active_balance, spec).ok()?; + altair::get_base_reward(effective_balance, base_reward_per_increment, spec) + .ok()?; for (flag_index, weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { - if att_participation_flags.contains(&flag_index) - && !participation.has_flag(flag_index).ok()? - { + if att_participation_flags.contains(&flag_index) { proposer_reward_numerator += base_reward.checked_mul(*weight)?; } } @@ -111,7 +115,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { let proposer_reward = proposer_reward_numerator .checked_div(WEIGHT_DENOMINATOR.checked_mul(spec.proposer_reward_quotient)?)?; - Some((index as u64, proposer_reward)).filter(|_| proposer_reward != 0) + Some((index, proposer_reward)).filter(|_| proposer_reward != 0) }) .collect(); @@ -124,10 +128,15 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { type Object = Attestation; + type Intermediate = AttestationRef<'a, T>; type Set = HashMap; - fn object(&self) -> &Attestation { - self.att + fn intermediate(&self) -> &AttestationRef<'a, T> { + &self.att + } + + fn convert_to_object(att_ref: &AttestationRef<'a, T>) -> Attestation { + att_ref.clone_as_attestation() } fn covering_set(&self) -> &HashMap { @@ -146,7 +155,7 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { /// of slashable voting, which is rare. fn update_covering_set( &mut self, - best_att: &Attestation, + best_att: &AttestationRef<'a, T>, covered_validators: &HashMap, ) { if self.att.data.slot == best_att.data.slot && self.att.data.index == best_att.data.index { @@ -170,16 +179,16 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { /// /// This isn't optimal, but with the Altair fork this code is obsolete and not worth upgrading. pub fn earliest_attestation_validators( - attestation: &Attestation, + attestation: &AttestationRef, state: &BeaconState, base_state: &BeaconStateBase, ) -> BitList { // Bitfield of validators whose attestations are new/fresh. - let mut new_validators = attestation.aggregation_bits.clone(); + let mut new_validators = attestation.indexed.aggregation_bits.clone(); - let state_attestations = if attestation.data.target.epoch == state.current_epoch() { + let state_attestations = if attestation.checkpoint.target_epoch == state.current_epoch() { &base_state.current_epoch_attestations - } else if attestation.data.target.epoch == state.previous_epoch() { + } else if attestation.checkpoint.target_epoch == state.previous_epoch() { &base_state.previous_epoch_attestations } else { return BitList::with_capacity(0).unwrap(); diff --git a/beacon_node/operation_pool/src/attestation_id.rs b/beacon_node/operation_pool/src/attestation_id.rs index f496ecb3a3..b65975787e 100644 --- a/beacon_node/operation_pool/src/attestation_id.rs +++ b/beacon_node/operation_pool/src/attestation_id.rs @@ -1,45 +1,12 @@ use serde_derive::{Deserialize, Serialize}; -use ssz::ssz_encode; use ssz_derive::{Decode, Encode}; -use types::{AttestationData, ChainSpec, Domain, Epoch, Fork, Hash256}; /// Serialized `AttestationData` augmented with a domain to encode the fork info. +/// +/// [DEPRECATED] To be removed once all nodes have updated to schema v12. #[derive( PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode, Serialize, Deserialize, )] pub struct AttestationId { v: Vec, } - -/// Number of domain bytes that the end of an attestation ID is padded with. -const DOMAIN_BYTES_LEN: usize = std::mem::size_of::(); - -impl AttestationId { - pub fn from_data( - attestation: &AttestationData, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> Self { - let mut bytes = ssz_encode(attestation); - let epoch = attestation.target.epoch; - bytes.extend_from_slice( - AttestationId::compute_domain_bytes(epoch, fork, genesis_validators_root, spec) - .as_bytes(), - ); - AttestationId { v: bytes } - } - - pub fn compute_domain_bytes( - epoch: Epoch, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> Hash256 { - spec.get_domain(epoch, Domain::BeaconAttester, fork, genesis_validators_root) - } - - pub fn domain_bytes_match(&self, domain_bytes: &Hash256) -> bool { - &self.v[self.v.len() - DOMAIN_BYTES_LEN..] == domain_bytes.as_bytes() - } -} diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs new file mode 100644 index 0000000000..0fb9bafd82 --- /dev/null +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -0,0 +1,245 @@ +use crate::AttestationStats; +use itertools::Itertools; +use std::collections::HashMap; +use types::{ + AggregateSignature, Attestation, AttestationData, BeaconState, BitList, Checkpoint, Epoch, + EthSpec, Hash256, Slot, +}; + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub struct CheckpointKey { + pub source: Checkpoint, + pub target_epoch: Epoch, +} + +#[derive(Debug, PartialEq, Eq, Hash)] +pub struct CompactAttestationData { + pub slot: Slot, + pub index: u64, + pub beacon_block_root: Hash256, + pub target_root: Hash256, +} + +#[derive(Debug, PartialEq)] +pub struct CompactIndexedAttestation { + pub attesting_indices: Vec, + pub aggregation_bits: BitList, + pub signature: AggregateSignature, +} + +#[derive(Debug)] +pub struct SplitAttestation { + pub checkpoint: CheckpointKey, + pub data: CompactAttestationData, + pub indexed: CompactIndexedAttestation, +} + +#[derive(Debug, Clone)] +pub struct AttestationRef<'a, T: EthSpec> { + pub checkpoint: &'a CheckpointKey, + pub data: &'a CompactAttestationData, + pub indexed: &'a CompactIndexedAttestation, +} + +#[derive(Debug, Default, PartialEq)] +pub struct AttestationMap { + checkpoint_map: HashMap>, +} + +#[derive(Debug, Default, PartialEq)] +pub struct AttestationDataMap { + attestations: HashMap>>, +} + +impl SplitAttestation { + pub fn new(attestation: Attestation, attesting_indices: Vec) -> Self { + let checkpoint = CheckpointKey { + source: attestation.data.source, + target_epoch: attestation.data.target.epoch, + }; + let data = CompactAttestationData { + slot: attestation.data.slot, + index: attestation.data.index, + beacon_block_root: attestation.data.beacon_block_root, + target_root: attestation.data.target.root, + }; + let indexed = CompactIndexedAttestation { + attesting_indices, + aggregation_bits: attestation.aggregation_bits, + signature: attestation.signature, + }; + Self { + checkpoint, + data, + indexed, + } + } + + pub fn as_ref(&self) -> AttestationRef { + AttestationRef { + checkpoint: &self.checkpoint, + data: &self.data, + indexed: &self.indexed, + } + } +} + +impl<'a, T: EthSpec> AttestationRef<'a, T> { + pub fn attestation_data(&self) -> AttestationData { + AttestationData { + slot: self.data.slot, + index: self.data.index, + beacon_block_root: self.data.beacon_block_root, + source: self.checkpoint.source, + target: Checkpoint { + epoch: self.checkpoint.target_epoch, + root: self.data.target_root, + }, + } + } + + pub fn clone_as_attestation(&self) -> Attestation { + Attestation { + aggregation_bits: self.indexed.aggregation_bits.clone(), + data: self.attestation_data(), + signature: self.indexed.signature.clone(), + } + } +} + +impl CheckpointKey { + /// Return two checkpoint keys: `(previous, current)` for the previous and current epochs of + /// the `state`. + pub fn keys_for_state(state: &BeaconState) -> (Self, Self) { + ( + CheckpointKey { + source: state.previous_justified_checkpoint(), + target_epoch: state.previous_epoch(), + }, + CheckpointKey { + source: state.current_justified_checkpoint(), + target_epoch: state.current_epoch(), + }, + ) + } +} + +impl CompactIndexedAttestation { + pub fn signers_disjoint_from(&self, other: &Self) -> bool { + self.aggregation_bits + .intersection(&other.aggregation_bits) + .is_zero() + } + + pub fn aggregate(&mut self, other: &Self) { + self.attesting_indices = self + .attesting_indices + .drain(..) + .merge(other.attesting_indices.iter().copied()) + .dedup() + .collect(); + self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); + self.signature.add_assign_aggregate(&other.signature); + } +} + +impl AttestationMap { + pub fn insert(&mut self, attestation: Attestation, attesting_indices: Vec) { + let SplitAttestation { + checkpoint, + data, + indexed, + } = SplitAttestation::new(attestation, attesting_indices); + + let attestation_map = self + .checkpoint_map + .entry(checkpoint) + .or_insert_with(AttestationDataMap::default); + let attestations = attestation_map + .attestations + .entry(data) + .or_insert_with(Vec::new); + + // Greedily aggregate the attestation with all existing attestations. + // NOTE: this is sub-optimal and in future we will remove this in favour of max-clique + // aggregation. + let mut aggregated = false; + for existing_attestation in attestations.iter_mut() { + if existing_attestation.signers_disjoint_from(&indexed) { + existing_attestation.aggregate(&indexed); + aggregated = true; + } else if *existing_attestation == indexed { + aggregated = true; + } + } + + if !aggregated { + attestations.push(indexed); + } + } + + /// Iterate all attestations matching the given `checkpoint_key`. + pub fn get_attestations<'a>( + &'a self, + checkpoint_key: &'a CheckpointKey, + ) -> impl Iterator> + 'a { + self.checkpoint_map + .get(checkpoint_key) + .into_iter() + .flat_map(|attestation_map| attestation_map.iter(checkpoint_key)) + } + + /// Iterate all attestations in the map. + pub fn iter(&self) -> impl Iterator> { + self.checkpoint_map + .iter() + .flat_map(|(checkpoint_key, attestation_map)| attestation_map.iter(checkpoint_key)) + } + + /// Prune attestations that are from before the previous epoch. + pub fn prune(&mut self, current_epoch: Epoch) { + self.checkpoint_map + .retain(|checkpoint_key, _| current_epoch <= checkpoint_key.target_epoch + 1); + } + + /// Statistics about all attestations stored in the map. + pub fn stats(&self) -> AttestationStats { + self.checkpoint_map + .values() + .map(AttestationDataMap::stats) + .fold(AttestationStats::default(), |mut acc, new| { + acc.num_attestations += new.num_attestations; + acc.num_attestation_data += new.num_attestation_data; + acc.max_aggregates_per_data = + std::cmp::max(acc.max_aggregates_per_data, new.max_aggregates_per_data); + acc + }) + } +} + +impl AttestationDataMap { + pub fn iter<'a>( + &'a self, + checkpoint_key: &'a CheckpointKey, + ) -> impl Iterator> + 'a { + self.attestations.iter().flat_map(|(data, vec_indexed)| { + vec_indexed.iter().map(|indexed| AttestationRef { + checkpoint: checkpoint_key, + data, + indexed, + }) + }) + } + + pub fn stats(&self) -> AttestationStats { + let mut stats = AttestationStats::default(); + + for aggregates in self.attestations.values() { + stats.num_attestations += aggregates.len(); + stats.num_attestation_data += 1; + stats.max_aggregates_per_data = + std::cmp::max(stats.max_aggregates_per_data, aggregates.len()); + } + stats + } +} diff --git a/beacon_node/operation_pool/src/attester_slashing.rs b/beacon_node/operation_pool/src/attester_slashing.rs index 2cb63ad252..f5916384d4 100644 --- a/beacon_node/operation_pool/src/attester_slashing.rs +++ b/beacon_node/operation_pool/src/attester_slashing.rs @@ -39,14 +39,18 @@ impl<'a, T: EthSpec> AttesterSlashingMaxCover<'a, T> { impl<'a, T: EthSpec> MaxCover for AttesterSlashingMaxCover<'a, T> { /// The result type, of which we would eventually like a collection of maximal quality. type Object = AttesterSlashing; + type Intermediate = AttesterSlashing; /// The type used to represent sets. type Set = HashMap; - /// Extract an object for inclusion in a solution. - fn object(&self) -> &AttesterSlashing { + fn intermediate(&self) -> &AttesterSlashing { self.slashing } + fn convert_to_object(slashing: &AttesterSlashing) -> AttesterSlashing { + slashing.clone() + } + /// Get the set of elements covered. fn covering_set(&self) -> &HashMap { &self.effective_balances diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index a6bb04d7b9..8c335189c6 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1,5 +1,6 @@ mod attestation; mod attestation_id; +mod attestation_storage; mod attester_slashing; mod max_cover; mod metrics; @@ -8,29 +9,30 @@ mod reward_cache; mod sync_aggregate_id; pub use attestation::AttMaxCover; +pub use attestation_storage::{AttestationRef, SplitAttestation}; pub use max_cover::MaxCover; -pub use persistence::{PersistedOperationPool, PersistedOperationPoolAltair}; +pub use persistence::{ + PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5, +}; pub use reward_cache::RewardCache; +use crate::attestation_storage::{AttestationMap, CheckpointKey}; use crate::sync_aggregate_id::SyncAggregateId; -use attestation_id::AttestationId; use attester_slashing::AttesterSlashingMaxCover; use max_cover::maximum_cover; use parking_lot::{RwLock, RwLockWriteGuard}; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ - get_slashable_indices_modular, verify_attestation_for_block_inclusion, verify_exit, - VerifySignatures, + get_slashable_indices_modular, verify_exit, VerifySignatures, }; -use state_processing::SigVerifiedOp; +use state_processing::{SigVerifiedOp, VerifyOperation}; use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::marker::PhantomData; use std::ptr; use types::{ - sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttesterSlashing, - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, Hash256, - ProposerSlashing, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, - Validator, + sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, Attestation, AttestationData, + AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, + SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, }; type SyncContributions = RwLock>>>; @@ -38,15 +40,15 @@ type SyncContributions = RwLock { /// Map from attestation ID (see below) to vectors of attestations. - attestations: RwLock>>>, + attestations: RwLock>, /// Map from sync aggregate ID to the best `SyncCommitteeContribution`s seen for that ID. sync_contributions: SyncContributions, /// Set of attester slashings, and the fork version they were verified against. - attester_slashings: RwLock, ForkVersion)>>, + attester_slashings: RwLock, T>>>, /// Map from proposer index to slashing. - proposer_slashings: RwLock>, + proposer_slashings: RwLock>>, /// Map from exiting validator to their exit data. - voluntary_exits: RwLock>, + voluntary_exits: RwLock>>, /// Reward cache for accelerating attestation packing. reward_cache: RwLock, _phantom: PhantomData, @@ -62,9 +64,11 @@ pub enum OpPoolError { RewardCacheGetBlockRoot(BeaconStateError), RewardCacheWrongEpoch, RewardCacheValidatorUnknown(BeaconStateError), + RewardCacheOutOfBounds, IncorrectOpPoolVariant, } +#[derive(Default)] pub struct AttestationStats { /// Total number of attestations for all committeees/indices/votes. pub num_attestations: usize, @@ -185,93 +189,40 @@ impl OperationPool { pub fn insert_attestation( &self, attestation: Attestation, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, + attesting_indices: Vec, ) -> Result<(), AttestationValidationError> { - let id = AttestationId::from_data(&attestation.data, fork, genesis_validators_root, spec); - - // Take a write lock on the attestations map. - let mut attestations = self.attestations.write(); - - let existing_attestations = match attestations.entry(id) { - Entry::Vacant(entry) => { - entry.insert(vec![attestation]); - return Ok(()); - } - Entry::Occupied(entry) => entry.into_mut(), - }; - - let mut aggregated = false; - for existing_attestation in existing_attestations.iter_mut() { - if existing_attestation.signers_disjoint_from(&attestation) { - existing_attestation.aggregate(&attestation); - aggregated = true; - } else if *existing_attestation == attestation { - aggregated = true; - } - } - - if !aggregated { - existing_attestations.push(attestation); - } - + self.attestations + .write() + .insert(attestation, attesting_indices); Ok(()) } /// Total number of attestations in the pool, including attestations for the same data. pub fn num_attestations(&self) -> usize { - self.attestations.read().values().map(Vec::len).sum() + self.attestation_stats().num_attestations } pub fn attestation_stats(&self) -> AttestationStats { - let mut num_attestations = 0; - let mut num_attestation_data = 0; - let mut max_aggregates_per_data = 0; - - for aggregates in self.attestations.read().values() { - num_attestations += aggregates.len(); - num_attestation_data += 1; - max_aggregates_per_data = std::cmp::max(max_aggregates_per_data, aggregates.len()); - } - AttestationStats { - num_attestations, - num_attestation_data, - max_aggregates_per_data, - } + self.attestations.read().stats() } /// Return all valid attestations for the given epoch, for use in max cover. + #[allow(clippy::too_many_arguments)] fn get_valid_attestations_for_epoch<'a>( &'a self, - epoch: Epoch, - all_attestations: &'a HashMap>>, + checkpoint_key: &'a CheckpointKey, + all_attestations: &'a AttestationMap, state: &'a BeaconState, reward_cache: &'a RewardCache, total_active_balance: u64, - validity_filter: impl FnMut(&&Attestation) -> bool + Send, + validity_filter: impl FnMut(&AttestationRef<'a, T>) -> bool + Send, spec: &'a ChainSpec, ) -> impl Iterator> + Send { - let domain_bytes = AttestationId::compute_domain_bytes( - epoch, - &state.fork(), - state.genesis_validators_root(), - spec, - ); all_attestations - .iter() - .filter(move |(key, _)| key.domain_bytes_match(&domain_bytes)) - .flat_map(|(_, attestations)| attestations) - .filter(move |attestation| attestation.data.target.epoch == epoch) - .filter(move |attestation| { - // Ensure attestations are valid for block inclusion - verify_attestation_for_block_inclusion( - state, - attestation, - VerifySignatures::False, - spec, - ) - .is_ok() + .get_attestations(checkpoint_key) + .filter(|att| { + att.data.slot + spec.min_attestation_inclusion_delay <= state.slot() + && state.slot() <= att.data.slot + T::slots_per_epoch() }) .filter(validity_filter) .filter_map(move |att| { @@ -288,13 +239,12 @@ impl OperationPool { pub fn get_attestations( &self, state: &BeaconState, - prev_epoch_validity_filter: impl FnMut(&&Attestation) -> bool + Send, - curr_epoch_validity_filter: impl FnMut(&&Attestation) -> bool + Send, + prev_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send, + curr_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send, spec: &ChainSpec, ) -> Result>, OpPoolError> { // Attestations for the current fork, which may be from the current or previous epoch. - let prev_epoch = state.previous_epoch(); - let current_epoch = state.current_epoch(); + let (prev_epoch_key, curr_epoch_key) = CheckpointKey::keys_for_state(state); let all_attestations = self.attestations.read(); let total_active_balance = state .get_total_active_balance() @@ -314,10 +264,10 @@ impl OperationPool { let prev_epoch_att = self .get_valid_attestations_for_epoch( - prev_epoch, + &prev_epoch_key, &*all_attestations, state, - &reward_cache, + &*reward_cache, total_active_balance, prev_epoch_validity_filter, spec, @@ -325,10 +275,10 @@ impl OperationPool { .inspect(|_| num_prev_valid += 1); let curr_epoch_att = self .get_valid_attestations_for_epoch( - current_epoch, + &curr_epoch_key, &*all_attestations, state, - &reward_cache, + &*reward_cache, total_active_balance, curr_epoch_validity_filter, spec, @@ -349,7 +299,7 @@ impl OperationPool { move || { let _timer = metrics::start_timer(&metrics::ATTESTATION_PREV_EPOCH_PACKING_TIME); // If we're in the genesis epoch, just use the current epoch attestations. - if prev_epoch == current_epoch { + if prev_epoch_key == curr_epoch_key { vec![] } else { maximum_cover(prev_epoch_att, prev_epoch_limit, "prev_epoch_attestations") @@ -377,36 +327,26 @@ impl OperationPool { /// Remove attestations which are too old to be included in a block. pub fn prune_attestations(&self, current_epoch: Epoch) { - // Prune attestations that are from before the previous epoch. - self.attestations.write().retain(|_, attestations| { - // All the attestations in this bucket have the same data, so we only need to - // check the first one. - attestations - .first() - .map_or(false, |att| current_epoch <= att.data.target.epoch + 1) - }); + self.attestations.write().prune(current_epoch); } /// Insert a proposer slashing into the pool. pub fn insert_proposer_slashing( &self, - verified_proposer_slashing: SigVerifiedOp, + verified_proposer_slashing: SigVerifiedOp, ) { - let slashing = verified_proposer_slashing.into_inner(); - self.proposer_slashings - .write() - .insert(slashing.signed_header_1.message.proposer_index, slashing); + self.proposer_slashings.write().insert( + verified_proposer_slashing.as_inner().proposer_index(), + verified_proposer_slashing, + ); } /// Insert an attester slashing into the pool. pub fn insert_attester_slashing( &self, - verified_slashing: SigVerifiedOp>, - fork: Fork, + verified_slashing: SigVerifiedOp, T>, ) { - self.attester_slashings - .write() - .insert((verified_slashing.into_inner(), fork.current_version)); + self.attester_slashings.write().insert(verified_slashing); } /// Get proposer and attester slashings for inclusion in a block. @@ -426,11 +366,13 @@ impl OperationPool { let proposer_slashings = filter_limit_operations( self.proposer_slashings.read().values(), |slashing| { - state - .validators() - .get(slashing.signed_header_1.message.proposer_index as usize) - .map_or(false, |validator| !validator.slashed) + slashing.signature_is_still_valid(&state.fork()) + && state + .validators() + .get(slashing.as_inner().signed_header_1.message.proposer_index as usize) + .map_or(false, |validator| !validator.slashed) }, + |slashing| slashing.as_inner().clone(), T::MaxProposerSlashings::to_usize(), ); @@ -438,30 +380,10 @@ impl OperationPool { // slashings. let mut to_be_slashed = proposer_slashings .iter() - .map(|s| s.signed_header_1.message.proposer_index) - .collect::>(); + .map(|s| s.proposer_index()) + .collect(); - let reader = self.attester_slashings.read(); - - let relevant_attester_slashings = reader.iter().flat_map(|(slashing, fork)| { - if *fork == state.fork().previous_version || *fork == state.fork().current_version { - AttesterSlashingMaxCover::new(slashing, &to_be_slashed, state) - } else { - None - } - }); - - let attester_slashings = maximum_cover( - relevant_attester_slashings, - T::MaxAttesterSlashings::to_usize(), - "attester_slashings", - ) - .into_iter() - .map(|cover| { - to_be_slashed.extend(cover.covering_set().keys()); - cover.object().clone() - }) - .collect(); + let attester_slashings = self.get_attester_slashings(state, &mut to_be_slashed); let voluntary_exits = self.get_voluntary_exits( state, @@ -472,6 +394,37 @@ impl OperationPool { (proposer_slashings, attester_slashings, voluntary_exits) } + /// Get attester slashings taking into account already slashed validators. + /// + /// This function *must* remain private. + fn get_attester_slashings( + &self, + state: &BeaconState, + to_be_slashed: &mut HashSet, + ) -> Vec> { + let reader = self.attester_slashings.read(); + + let relevant_attester_slashings = reader.iter().flat_map(|slashing| { + if slashing.signature_is_still_valid(&state.fork()) { + AttesterSlashingMaxCover::new(slashing.as_inner(), to_be_slashed, state) + } else { + None + } + }); + + maximum_cover( + relevant_attester_slashings, + T::MaxAttesterSlashings::to_usize(), + "attester_slashings", + ) + .into_iter() + .map(|cover| { + to_be_slashed.extend(cover.covering_set().keys()); + cover.intermediate().clone() + }) + .collect() + } + /// Prune proposer slashings for validators which are exited in the finalized epoch. pub fn prune_proposer_slashings(&self, head_state: &BeaconState) { prune_validator_hash_map( @@ -484,30 +437,23 @@ impl OperationPool { /// Prune attester slashings for all slashed or withdrawn validators, or attestations on another /// fork. pub fn prune_attester_slashings(&self, head_state: &BeaconState) { - self.attester_slashings - .write() - .retain(|(slashing, fork_version)| { - let previous_fork_is_finalized = - head_state.finalized_checkpoint().epoch >= head_state.fork().epoch; - // Prune any slashings which don't match the current fork version, or the previous - // fork version if it is not finalized yet. - let fork_ok = (*fork_version == head_state.fork().current_version) - || (*fork_version == head_state.fork().previous_version - && !previous_fork_is_finalized); - // Slashings that don't slash any validators can also be dropped. - let slashing_ok = - get_slashable_indices_modular(head_state, slashing, |_, validator| { - // Declare that a validator is still slashable if they have not exited prior - // to the finalized epoch. - // - // We cannot check the `slashed` field since the `head` is not finalized and - // a fork could un-slash someone. - validator.exit_epoch > head_state.finalized_checkpoint().epoch - }) - .map_or(false, |indices| !indices.is_empty()); + self.attester_slashings.write().retain(|slashing| { + // Check that the attestation's signature is still valid wrt the fork version. + let signature_ok = slashing.signature_is_still_valid(&head_state.fork()); + // Slashings that don't slash any validators can also be dropped. + let slashing_ok = + get_slashable_indices_modular(head_state, slashing.as_inner(), |_, validator| { + // Declare that a validator is still slashable if they have not exited prior + // to the finalized epoch. + // + // We cannot check the `slashed` field since the `head` is not finalized and + // a fork could un-slash someone. + validator.exit_epoch > head_state.finalized_checkpoint().epoch + }) + .map_or(false, |indices| !indices.is_empty()); - fork_ok && slashing_ok - }); + signature_ok && slashing_ok + }); } /// Total number of attester slashings in the pool. @@ -521,11 +467,10 @@ impl OperationPool { } /// Insert a voluntary exit that has previously been checked elsewhere. - pub fn insert_voluntary_exit(&self, verified_exit: SigVerifiedOp) { - let exit = verified_exit.into_inner(); + pub fn insert_voluntary_exit(&self, exit: SigVerifiedOp) { self.voluntary_exits .write() - .insert(exit.message.validator_index, exit); + .insert(exit.as_inner().message.validator_index, exit); } /// Get a list of voluntary exits for inclusion in a block. @@ -540,7 +485,12 @@ impl OperationPool { { filter_limit_operations( self.voluntary_exits.read().values(), - |exit| filter(exit) && verify_exit(state, exit, VerifySignatures::False, spec).is_ok(), + |exit| { + filter(exit.as_inner()) + && exit.signature_is_still_valid(&state.fork()) + && verify_exit(state, exit.as_inner(), VerifySignatures::False, spec).is_ok() + }, + |exit| exit.as_inner().clone(), T::MaxVoluntaryExits::to_usize(), ) } @@ -579,8 +529,8 @@ impl OperationPool { pub fn get_all_attestations(&self) -> Vec> { self.attestations .read() - .values() - .flat_map(|attns| attns.iter().cloned()) + .iter() + .map(|att| att.clone_as_attestation()) .collect() } @@ -589,14 +539,13 @@ impl OperationPool { /// This method may return objects that are invalid for block inclusion. pub fn get_filtered_attestations(&self, filter: F) -> Vec> where - F: Fn(&Attestation) -> bool, + F: Fn(&AttestationData) -> bool, { self.attestations .read() - .values() - .flat_map(|attns| attns.iter()) - .filter(|attn| filter(*attn)) - .cloned() + .iter() + .filter(|att| filter(&att.attestation_data())) + .map(|att| att.clone_as_attestation()) .collect() } @@ -607,7 +556,7 @@ impl OperationPool { self.attester_slashings .read() .iter() - .map(|(slashing, _)| slashing.clone()) + .map(|slashing| slashing.as_inner().clone()) .collect() } @@ -618,7 +567,7 @@ impl OperationPool { self.proposer_slashings .read() .iter() - .map(|(_, slashing)| slashing.clone()) + .map(|(_, slashing)| slashing.as_inner().clone()) .collect() } @@ -629,23 +578,29 @@ impl OperationPool { self.voluntary_exits .read() .iter() - .map(|(_, exit)| exit.clone()) + .map(|(_, exit)| exit.as_inner().clone()) .collect() } } /// Filter up to a maximum number of operations out of an iterator. -fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: usize) -> Vec +fn filter_limit_operations<'a, T: 'a, V: 'a, I, F, G>( + operations: I, + filter: F, + mapping: G, + limit: usize, +) -> Vec where I: IntoIterator, F: Fn(&T) -> bool, + G: Fn(&T) -> V, T: Clone, { operations .into_iter() .filter(|x| filter(*x)) .take(limit) - .cloned() + .map(mapping) .collect() } @@ -655,17 +610,19 @@ where /// in the state's validator registry and then passed to `prune_if`. /// Entries for unknown validators will be kept. fn prune_validator_hash_map( - map: &mut HashMap, + map: &mut HashMap>, prune_if: F, head_state: &BeaconState, ) where F: Fn(&Validator) -> bool, + T: VerifyOperation, { - map.retain(|&validator_index, _| { - head_state - .validators() - .get(validator_index as usize) - .map_or(true, |validator| !prune_if(validator)) + map.retain(|&validator_index, op| { + op.signature_is_still_valid(&head_state.fork()) + && head_state + .validators() + .get(validator_index as usize) + .map_or(true, |validator| !prune_if(validator)) }); } @@ -676,6 +633,7 @@ impl PartialEq for OperationPool { return true; } *self.attestations.read() == *other.attestations.read() + && *self.sync_contributions.read() == *other.sync_contributions.read() && *self.attester_slashings.read() == *other.attester_slashings.read() && *self.proposer_slashings.read() == *other.proposer_slashings.read() && *self.voluntary_exits.read() == *other.voluntary_exits.read() @@ -690,7 +648,8 @@ mod release_tests { test_spec, BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, }; use lazy_static::lazy_static; - use state_processing::VerifyOperation; + use maplit::hashset; + use state_processing::{common::get_attesting_indices_from_state, VerifyOperation}; use std::collections::BTreeSet; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::*; @@ -710,6 +669,7 @@ mod release_tests { .spec_or_default(spec) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() + .mock_execution_layer() .build(); harness.advance_slot(); @@ -731,11 +691,10 @@ mod release_tests { } /// Test state for sync contribution-related tests. - fn sync_contribution_test_state( + async fn sync_contribution_test_state( num_committees: usize, ) -> (BeaconChainHarness>, ChainSpec) { let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); let num_validators = @@ -743,12 +702,14 @@ mod release_tests { let harness = get_harness::(num_validators, Some(spec.clone())); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - &[Slot::new(1)], - (0..num_validators).collect::>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + &[Slot::new(1)], + (0..num_validators).collect::>().as_slice(), + ) + .await; (harness, spec) } @@ -799,10 +760,19 @@ mod release_tests { }) .unwrap(); + let att1_indices = get_attesting_indices_from_state(&state, &att1).unwrap(); + let att2_indices = get_attesting_indices_from_state(&state, &att2).unwrap(); + let att1_split = SplitAttestation::new(att1.clone(), att1_indices); + let att2_split = SplitAttestation::new(att2.clone(), att2_indices); + assert_eq!( att1.aggregation_bits.num_set_bits(), - earliest_attestation_validators(&att1, &state, state.as_base().unwrap()) - .num_set_bits() + earliest_attestation_validators( + &att1_split.as_ref(), + &state, + state.as_base().unwrap() + ) + .num_set_bits() ); state @@ -819,8 +789,12 @@ mod release_tests { assert_eq!( committees.get(0).unwrap().committee.len() - 2, - earliest_attestation_validators(&att2, &state, state.as_base().unwrap()) - .num_set_bits() + earliest_attestation_validators( + &att2_split.as_ref(), + &state, + state.as_base().unwrap() + ) + .num_set_bits() ); } } @@ -859,14 +833,12 @@ mod release_tests { ); for (atts, _) in attestations { - for att in atts.into_iter() { - op_pool - .insert_attestation(att.0, &state.fork(), state.genesis_validators_root(), spec) - .unwrap(); + for (att, _) in atts { + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } } - assert_eq!(op_pool.attestations.read().len(), committees.len()); assert_eq!(op_pool.num_attestations(), committees.len()); // Before the min attestation inclusion delay, get_attestations shouldn't return anything. @@ -932,17 +904,11 @@ mod release_tests { for (_, aggregate) in attestations { let att = aggregate.unwrap().message.aggregate; + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); op_pool - .insert_attestation( - att.clone(), - &state.fork(), - state.genesis_validators_root(), - spec, - ) - .unwrap(); - op_pool - .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) + .insert_attestation(att.clone(), attesting_indices.clone()) .unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } assert_eq!(op_pool.num_attestations(), committees.len()); @@ -1026,16 +992,17 @@ mod release_tests { .collect::>(); for att in aggs1.into_iter().chain(aggs2.into_iter()) { - op_pool - .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) - .unwrap(); + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } } // The attestations should get aggregated into two attestations that comprise all // validators. - assert_eq!(op_pool.attestations.read().len(), committees.len()); - assert_eq!(op_pool.num_attestations(), 2 * committees.len()); + let stats = op_pool.attestation_stats(); + assert_eq!(stats.num_attestation_data, committees.len()); + assert_eq!(stats.num_attestations, 2 * committees.len()); + assert_eq!(stats.max_aggregates_per_data, 2); } /// Create a bunch of attestations signed by a small number of validators, and another @@ -1097,9 +1064,8 @@ mod release_tests { .collect::>(); for att in aggs { - op_pool - .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) - .unwrap(); + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } }; @@ -1114,12 +1080,13 @@ mod release_tests { let num_small = target_committee_size / small_step_size; let num_big = target_committee_size / big_step_size; - assert_eq!(op_pool.attestations.read().len(), committees.len()); + let stats = op_pool.attestation_stats(); + assert_eq!(stats.num_attestation_data, committees.len()); assert_eq!( - op_pool.num_attestations(), + stats.num_attestations, (num_small + num_big) * committees.len() ); - assert!(op_pool.num_attestations() > max_attestations); + assert!(stats.num_attestations > max_attestations); *state.slot_mut() += spec.min_attestation_inclusion_delay; let best_attestations = op_pool @@ -1192,9 +1159,8 @@ mod release_tests { .collect::>(); for att in aggs { - op_pool - .insert_attestation(att, &state.fork(), state.genesis_validators_root(), spec) - .unwrap(); + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); } }; @@ -1209,7 +1175,10 @@ mod release_tests { let num_small = target_committee_size / small_step_size; let num_big = target_committee_size / big_step_size; - assert_eq!(op_pool.attestations.read().len(), committees.len()); + assert_eq!( + op_pool.attestation_stats().num_attestation_data, + committees.len() + ); assert_eq!( op_pool.num_attestations(), (num_small + num_big) * committees.len() @@ -1229,11 +1198,21 @@ mod release_tests { // Used for asserting that rewards are in decreasing order. let mut prev_reward = u64::max_value(); - for att in &best_attestations { - let mut fresh_validators_rewards = - AttMaxCover::new(att, &state, total_active_balance, spec) - .unwrap() - .fresh_validators_rewards; + let mut reward_cache = RewardCache::default(); + reward_cache.update(&state).unwrap(); + + for att in best_attestations { + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + let split_attestation = SplitAttestation::new(att, attesting_indices); + let mut fresh_validators_rewards = AttMaxCover::new( + split_attestation.as_ref(), + &state, + &reward_cache, + total_active_balance, + spec, + ) + .unwrap() + .fresh_validators_rewards; // Remove validators covered by previous attestations. fresh_validators_rewards @@ -1300,10 +1279,7 @@ mod release_tests { let op_pool = OperationPool::::new(); let slashing = harness.make_attester_slashing(vec![1, 3, 5, 7, 9]); - op_pool.insert_attester_slashing( - slashing.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing.clone().validate(&state, spec).unwrap()); op_pool.prune_attester_slashings(&state); assert_eq!( op_pool.get_slashings_and_exits(&state, &harness.spec).1, @@ -1324,22 +1300,10 @@ mod release_tests { let slashing_3 = harness.make_attester_slashing(vec![4, 5, 6]); let slashing_4 = harness.make_attester_slashing(vec![7, 8, 9, 10]); - op_pool.insert_attester_slashing( - slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_4.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_3.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_4.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![slashing_4, slashing_3]); @@ -1358,22 +1322,10 @@ mod release_tests { let slashing_3 = harness.make_attester_slashing(vec![5, 6]); let slashing_4 = harness.make_attester_slashing(vec![6]); - op_pool.insert_attester_slashing( - slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_4.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_3.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_4.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![slashing_1, slashing_3]); @@ -1393,18 +1345,9 @@ mod release_tests { let a_slashing_3 = harness.make_attester_slashing(vec![5, 6]); op_pool.insert_proposer_slashing(p_slashing.clone().validate(&state, spec).unwrap()); - op_pool.insert_attester_slashing( - a_slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - a_slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - a_slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(a_slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(a_slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(a_slashing_3.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![a_slashing_1, a_slashing_3]); @@ -1425,18 +1368,9 @@ mod release_tests { let slashing_2 = harness.make_attester_slashing(vec![5, 6]); let slashing_3 = harness.make_attester_slashing(vec![1, 2, 3]); - op_pool.insert_attester_slashing( - slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_3.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![slashing_1, slashing_3]); @@ -1457,27 +1391,18 @@ mod release_tests { let slashing_2 = harness.make_attester_slashing(vec![4, 5, 6]); let slashing_3 = harness.make_attester_slashing(vec![7, 8]); - op_pool.insert_attester_slashing( - slashing_1.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_2.clone().validate(&state, spec).unwrap(), - state.fork(), - ); - op_pool.insert_attester_slashing( - slashing_3.clone().validate(&state, spec).unwrap(), - state.fork(), - ); + op_pool.insert_attester_slashing(slashing_1.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_2.clone().validate(&state, spec).unwrap()); + op_pool.insert_attester_slashing(slashing_3.clone().validate(&state, spec).unwrap()); let best_slashings = op_pool.get_slashings_and_exits(&state, &harness.spec); assert_eq!(best_slashings.1, vec![slashing_2, slashing_3]); } /// End-to-end test of basic sync contribution handling. - #[test] - fn sync_contribution_aggregation_insert_get_prune() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_aggregation_insert_get_prune() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1535,9 +1460,9 @@ mod release_tests { } /// Adding a sync contribution already in the pool should not increase the size of the pool. - #[test] - fn sync_contribution_duplicate() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_duplicate() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1572,9 +1497,9 @@ mod release_tests { /// Adding a sync contribution already in the pool with more bits set should increase the /// number of bits set in the aggregate. - #[test] - fn sync_contribution_with_more_bits() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_with_more_bits() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1652,9 +1577,9 @@ mod release_tests { /// Adding a sync contribution already in the pool with fewer bits set should not increase the /// number of bits set in the aggregate. - #[test] - fn sync_contribution_with_fewer_bits() { - let (harness, _) = sync_contribution_test_state::(1); + #[tokio::test] + async fn sync_contribution_with_fewer_bits() { + let (harness, _) = sync_contribution_test_state::(1).await; let op_pool = OperationPool::::new(); let state = harness.get_current_state(); @@ -1737,4 +1662,289 @@ mod release_tests { expected_bits ); } + + fn cross_fork_harness() -> (BeaconChainHarness>, ChainSpec) + { + let mut spec = test_spec::(); + + // Give some room to sign surround slashings. + spec.altair_fork_epoch = Some(Epoch::new(3)); + spec.bellatrix_fork_epoch = Some(Epoch::new(6)); + + // To make exits immediately valid. + spec.shard_committee_period = 0; + + let num_validators = 32; + + let harness = get_harness::(num_validators, Some(spec.clone())); + (harness, spec) + } + + /// Test several cross-fork voluntary exits: + /// + /// - phase0 exit (not valid after Bellatrix) + /// - phase0 exit signed with Altair fork version (only valid after Bellatrix) + #[tokio::test] + async fn cross_fork_exits() { + let (harness, spec) = cross_fork_harness::(); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); + let bellatrix_fork_epoch = spec.bellatrix_fork_epoch.unwrap(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + + let op_pool = OperationPool::::new(); + + // Sign an exit in phase0 with a phase0 epoch. + let exit1 = harness.make_voluntary_exit(0, Epoch::new(0)); + + // Advance to Altair. + harness + .extend_to_slot(altair_fork_epoch.start_slot(slots_per_epoch)) + .await; + let altair_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!(altair_head.beacon_state.current_epoch(), altair_fork_epoch); + + // Add exit 1 to the op pool during Altair. It's still valid at this point and should be + // returned. + let verified_exit1 = exit1 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_voluntary_exit(verified_exit1); + let exits = + op_pool.get_voluntary_exits(&altair_head.beacon_state, |_| true, &harness.chain.spec); + assert!(exits.contains(&exit1)); + assert_eq!(exits.len(), 1); + + // Advance to Bellatrix. + harness + .extend_to_slot(bellatrix_fork_epoch.start_slot(slots_per_epoch)) + .await; + let bellatrix_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!( + bellatrix_head.beacon_state.current_epoch(), + bellatrix_fork_epoch + ); + + // Sign an exit with the Altair domain and a phase0 epoch. This is a weird type of exit + // that is valid because after the Bellatrix fork we'll use the Altair fork domain to verify + // all prior epochs. + let exit2 = harness.make_voluntary_exit(2, Epoch::new(0)); + let verified_exit2 = exit2 + .clone() + .validate(&bellatrix_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_voluntary_exit(verified_exit2); + + // Attempting to fetch exit1 now should fail, despite it still being in the pool. + // exit2 should still be valid, because it was signed with the Altair fork domain. + assert_eq!(op_pool.voluntary_exits.read().len(), 2); + let exits = + op_pool.get_voluntary_exits(&bellatrix_head.beacon_state, |_| true, &harness.spec); + assert_eq!(&exits, &[exit2]); + } + + /// Test several cross-fork proposer slashings: + /// + /// - phase0 slashing (not valid after Bellatrix) + /// - Bellatrix signed with Altair fork version (not valid after Bellatrix) + /// - phase0 exit signed with Altair fork version (only valid after Bellatrix) + #[tokio::test] + async fn cross_fork_proposer_slashings() { + let (harness, spec) = cross_fork_harness::(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); + let bellatrix_fork_epoch = spec.bellatrix_fork_epoch.unwrap(); + let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(slots_per_epoch); + + let op_pool = OperationPool::::new(); + + // Sign a proposer slashing in phase0 with a phase0 epoch. + let slashing1 = harness.make_proposer_slashing_at_slot(0, Some(Slot::new(1))); + + // Advance to Altair. + harness + .extend_to_slot(altair_fork_epoch.start_slot(slots_per_epoch)) + .await; + let altair_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!(altair_head.beacon_state.current_epoch(), altair_fork_epoch); + + // Add slashing1 to the op pool during Altair. It's still valid at this point and should be + // returned. + let verified_slashing1 = slashing1 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_proposer_slashing(verified_slashing1); + let (proposer_slashings, _, _) = + op_pool.get_slashings_and_exits(&altair_head.beacon_state, &harness.chain.spec); + assert!(proposer_slashings.contains(&slashing1)); + assert_eq!(proposer_slashings.len(), 1); + + // Sign a proposer slashing with a Bellatrix slot using the Altair fork domain. + // + // This slashing is valid only before the Bellatrix fork epoch. + let slashing2 = harness.make_proposer_slashing_at_slot(1, Some(bellatrix_fork_slot)); + let verified_slashing2 = slashing2 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_proposer_slashing(verified_slashing2); + let (proposer_slashings, _, _) = + op_pool.get_slashings_and_exits(&altair_head.beacon_state, &harness.chain.spec); + assert!(proposer_slashings.contains(&slashing1)); + assert!(proposer_slashings.contains(&slashing2)); + assert_eq!(proposer_slashings.len(), 2); + + // Advance to Bellatrix. + harness.extend_to_slot(bellatrix_fork_slot).await; + let bellatrix_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!( + bellatrix_head.beacon_state.current_epoch(), + bellatrix_fork_epoch + ); + + // Sign a proposer slashing with the Altair domain and a phase0 slot. This is a weird type + // of slashing that is only valid after the Bellatrix fork because we'll use the Altair fork + // domain to verify all prior epochs. + let slashing3 = harness.make_proposer_slashing_at_slot(2, Some(Slot::new(1))); + let verified_slashing3 = slashing3 + .clone() + .validate(&bellatrix_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_proposer_slashing(verified_slashing3); + + // Attempting to fetch slashing1 now should fail, despite it still being in the pool. + // Likewise slashing2 is also invalid now because it should be signed with the + // Bellatrix fork version. + // slashing3 should still be valid, because it was signed with the Altair fork domain. + assert_eq!(op_pool.proposer_slashings.read().len(), 3); + let (proposer_slashings, _, _) = + op_pool.get_slashings_and_exits(&bellatrix_head.beacon_state, &harness.spec); + assert!(proposer_slashings.contains(&slashing3)); + assert_eq!(proposer_slashings.len(), 1); + } + + /// Test several cross-fork attester slashings: + /// + /// - both target epochs in phase0 (not valid after Bellatrix) + /// - both target epochs in Bellatrix but signed with Altair domain (not valid after Bellatrix) + /// - Altair attestation that surrounds a phase0 attestation (not valid after Bellatrix) + /// - both target epochs in phase0 but signed with Altair domain (only valid after Bellatrix) + #[tokio::test] + async fn cross_fork_attester_slashings() { + let (harness, spec) = cross_fork_harness::(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + let zero_epoch = Epoch::new(0); + let altair_fork_epoch = spec.altair_fork_epoch.unwrap(); + let bellatrix_fork_epoch = spec.bellatrix_fork_epoch.unwrap(); + let bellatrix_fork_slot = bellatrix_fork_epoch.start_slot(slots_per_epoch); + + let op_pool = OperationPool::::new(); + + // Sign an attester slashing with the phase0 fork version, with both target epochs in phase0. + let slashing1 = harness.make_attester_slashing_with_epochs( + vec![0], + None, + Some(zero_epoch), + None, + Some(zero_epoch), + ); + + // Advance to Altair. + harness + .extend_to_slot(altair_fork_epoch.start_slot(slots_per_epoch)) + .await; + let altair_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!(altair_head.beacon_state.current_epoch(), altair_fork_epoch); + + // Add slashing1 to the op pool during Altair. It's still valid at this point and should be + // returned. + let verified_slashing1 = slashing1 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_attester_slashing(verified_slashing1); + + // Sign an attester slashing with two Bellatrix epochs using the Altair fork domain. + // + // This slashing is valid only before the Bellatrix fork epoch. + let slashing2 = harness.make_attester_slashing_with_epochs( + vec![1], + None, + Some(bellatrix_fork_epoch), + None, + Some(bellatrix_fork_epoch), + ); + let verified_slashing2 = slashing2 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_attester_slashing(verified_slashing2); + let (_, attester_slashings, _) = + op_pool.get_slashings_and_exits(&altair_head.beacon_state, &harness.chain.spec); + assert!(attester_slashings.contains(&slashing1)); + assert!(attester_slashings.contains(&slashing2)); + assert_eq!(attester_slashings.len(), 2); + + // Sign an attester slashing where an Altair attestation surrounds a phase0 one. + // + // This slashing is valid only before the Bellatrix fork epoch. + let slashing3 = harness.make_attester_slashing_with_epochs( + vec![2], + Some(Epoch::new(0)), + Some(altair_fork_epoch), + Some(Epoch::new(1)), + Some(altair_fork_epoch - 1), + ); + let verified_slashing3 = slashing3 + .clone() + .validate(&altair_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_attester_slashing(verified_slashing3); + + // All three slashings should be valid and returned from the pool at this point. + // Seeing as we can only extract 2 at time we'll just pretend that validator 0 is already + // slashed. + let mut to_be_slashed = hashset! {0}; + let attester_slashings = + op_pool.get_attester_slashings(&altair_head.beacon_state, &mut to_be_slashed); + assert!(attester_slashings.contains(&slashing2)); + assert!(attester_slashings.contains(&slashing3)); + assert_eq!(attester_slashings.len(), 2); + + // Advance to Bellatrix. + harness.extend_to_slot(bellatrix_fork_slot).await; + let bellatrix_head = harness.chain.canonical_head.cached_head().snapshot; + assert_eq!( + bellatrix_head.beacon_state.current_epoch(), + bellatrix_fork_epoch + ); + + // Sign an attester slashing with the Altair domain and phase0 epochs. This is a weird type + // of slashing that is only valid after the Bellatrix fork because we'll use the Altair fork + // domain to verify all prior epochs. + let slashing4 = harness.make_attester_slashing_with_epochs( + vec![3], + Some(Epoch::new(0)), + Some(altair_fork_epoch - 1), + Some(Epoch::new(0)), + Some(altair_fork_epoch - 1), + ); + let verified_slashing4 = slashing4 + .clone() + .validate(&bellatrix_head.beacon_state, &harness.chain.spec) + .unwrap(); + op_pool.insert_attester_slashing(verified_slashing4); + + // All slashings except slashing4 are now invalid (despite being present in the pool). + assert_eq!(op_pool.attester_slashings.read().len(), 4); + let (_, attester_slashings, _) = + op_pool.get_slashings_and_exits(&bellatrix_head.beacon_state, &harness.spec); + assert!(attester_slashings.contains(&slashing4)); + assert_eq!(attester_slashings.len(), 1); + + // Pruning the attester slashings should remove all but slashing4. + op_pool.prune_attester_slashings(&bellatrix_head.beacon_state); + assert_eq!(op_pool.attester_slashings.read().len(), 1); + } } diff --git a/beacon_node/operation_pool/src/max_cover.rs b/beacon_node/operation_pool/src/max_cover.rs index 8e50b8152e..2e629f786b 100644 --- a/beacon_node/operation_pool/src/max_cover.rs +++ b/beacon_node/operation_pool/src/max_cover.rs @@ -11,16 +11,21 @@ use itertools::Itertools; pub trait MaxCover: Clone { /// The result type, of which we would eventually like a collection of maximal quality. type Object: Clone; + /// The intermediate object type, which can be converted to `Object`. + type Intermediate: Clone; /// The type used to represent sets. type Set: Clone; - /// Extract an object for inclusion in a solution. - fn object(&self) -> &Self::Object; + /// Extract the intermediate object. + fn intermediate(&self) -> &Self::Intermediate; + + /// Convert the borrowed intermediate object to an owned object for the solution. + fn convert_to_object(intermediate: &Self::Intermediate) -> Self::Object; /// Get the set of elements covered. fn covering_set(&self) -> &Self::Set; /// Update the set of items covered, for the inclusion of some object in the solution. - fn update_covering_set(&mut self, max_obj: &Self::Object, max_set: &Self::Set); + fn update_covering_set(&mut self, max_obj: &Self::Intermediate, max_set: &Self::Set); /// The quality of this item's covering set, usually its cardinality. fn score(&self) -> usize; } @@ -86,7 +91,7 @@ where .filter(|x| x.available && x.item.score() != 0) .for_each(|x| { x.item - .update_covering_set(best.object(), best.covering_set()) + .update_covering_set(best.intermediate(), best.covering_set()) }); result.push(best); @@ -106,7 +111,7 @@ where .into_iter() .merge_by(cover2, |item1, item2| item1.score() >= item2.score()) .take(limit) - .map(|item| item.object().clone()) + .map(|item| T::convert_to_object(item.intermediate())) .collect() } @@ -121,12 +126,17 @@ mod test { T: Clone + Eq + Hash, { type Object = Self; + type Intermediate = Self; type Set = Self; - fn object(&self) -> &Self { + fn intermediate(&self) -> &Self { self } + fn convert_to_object(set: &Self) -> Self { + set.clone() + } + fn covering_set(&self) -> &Self { self } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 84178d1309..1a612f9119 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -1,12 +1,13 @@ use crate::attestation_id::AttestationId; +use crate::attestation_storage::AttestationMap; use crate::sync_aggregate_id::SyncAggregateId; use crate::OpPoolError; use crate::OperationPool; use derivative::Derivative; use parking_lot::RwLock; -use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use state_processing::SigVerifiedOp; use store::{DBColumn, Error as StoreError, StoreItem}; use types::*; @@ -17,32 +18,42 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec { - /// Mapping from attestation ID to attestation mappings. - // We could save space by not storing the attestation ID, but it might - // be difficult to make that roundtrip due to eager aggregation. - attestations: Vec<(AttestationId, Vec>)>, + /// [DEPRECATED] Mapping from attestation ID to attestation mappings. + #[superstruct(only(V5))] + pub attestations_v5: Vec<(AttestationId, Vec>)>, + /// Attestations and their attesting indices. + #[superstruct(only(V12))] + pub attestations: Vec<(Attestation, Vec)>, /// Mapping from sync contribution ID to sync contributions and aggregate. - #[superstruct(only(Altair))] - sync_contributions: PersistedSyncContributions, + pub sync_contributions: PersistedSyncContributions, + /// [DEPRECATED] Attester slashings. + #[superstruct(only(V5))] + pub attester_slashings_v5: Vec<(AttesterSlashing, ForkVersion)>, /// Attester slashings. - attester_slashings: Vec<(AttesterSlashing, ForkVersion)>, - /// Proposer slashings. - proposer_slashings: Vec, - /// Voluntary exits. - voluntary_exits: Vec, + #[superstruct(only(V12))] + pub attester_slashings: Vec, T>>, + /// [DEPRECATED] Proposer slashings. + #[superstruct(only(V5))] + pub proposer_slashings_v5: Vec, + /// Proposer slashings with fork information. + #[superstruct(only(V12))] + pub proposer_slashings: Vec>, + /// [DEPRECATED] Voluntary exits. + #[superstruct(only(V5))] + pub voluntary_exits_v5: Vec, + /// Voluntary exits with fork information. + #[superstruct(only(V12))] + pub voluntary_exits: Vec>, } impl PersistedOperationPool { @@ -52,7 +63,12 @@ impl PersistedOperationPool { .attestations .read() .iter() - .map(|(att_id, att)| (att_id.clone(), att.clone())) + .map(|att| { + ( + att.clone_as_attestation(), + att.indexed.attesting_indices.clone(), + ) + }) .collect(); let sync_contributions = operation_pool @@ -83,7 +99,7 @@ impl PersistedOperationPool { .map(|(_, exit)| exit.clone()) .collect(); - PersistedOperationPool::Altair(PersistedOperationPoolAltair { + PersistedOperationPool::V12(PersistedOperationPoolV12 { attestations, sync_contributions, attester_slashings, @@ -92,46 +108,62 @@ impl PersistedOperationPool { }) } - /// Reconstruct an `OperationPool`. Sets `sync_contributions` to its `Default` if `self` matches - /// `PersistedOperationPool::Base`. + /// Reconstruct an `OperationPool`. pub fn into_operation_pool(self) -> Result, OpPoolError> { - let attestations = RwLock::new(self.attestations().iter().cloned().collect()); - let attester_slashings = RwLock::new(self.attester_slashings().iter().cloned().collect()); + let attester_slashings = RwLock::new(self.attester_slashings()?.iter().cloned().collect()); let proposer_slashings = RwLock::new( - self.proposer_slashings() + self.proposer_slashings()? .iter() .cloned() - .map(|slashing| (slashing.signed_header_1.message.proposer_index, slashing)) + .map(|slashing| (slashing.as_inner().proposer_index(), slashing)) .collect(), ); let voluntary_exits = RwLock::new( - self.voluntary_exits() + self.voluntary_exits()? .iter() .cloned() - .map(|exit| (exit.message.validator_index, exit)) + .map(|exit| (exit.as_inner().message.validator_index, exit)) .collect(), ); - let op_pool = match self { - PersistedOperationPool::Altair(_) => { - let sync_contributions = - RwLock::new(self.sync_contributions()?.iter().cloned().collect()); - - OperationPool { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - reward_cache: Default::default(), - _phantom: Default::default(), + let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect()); + let attestations = match self { + PersistedOperationPool::V5(_) => return Err(OpPoolError::IncorrectOpPoolVariant), + PersistedOperationPool::V12(pool) => { + let mut map = AttestationMap::default(); + for (att, attesting_indices) in pool.attestations { + map.insert(att, attesting_indices); } + RwLock::new(map) } }; + let op_pool = OperationPool { + attestations, + sync_contributions, + attester_slashings, + proposer_slashings, + voluntary_exits, + reward_cache: Default::default(), + _phantom: Default::default(), + }; Ok(op_pool) } } -/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::Altair`. +impl StoreItem for PersistedOperationPoolV5 { + fn db_column() -> DBColumn { + DBColumn::OpPool + } + + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + PersistedOperationPoolV5::from_ssz_bytes(bytes).map_err(Into::into) + } +} + +/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { DBColumn::OpPool @@ -142,9 +174,9 @@ impl StoreItem for PersistedOperationPool { } fn from_store_bytes(bytes: &[u8]) -> Result { - // Default deserialization to the Altair variant. - PersistedOperationPoolAltair::from_ssz_bytes(bytes) - .map(Self::Altair) + // Default deserialization to the latest variant. + PersistedOperationPoolV12::from_ssz_bytes(bytes) + .map(Self::V12) .map_err(Into::into) } } diff --git a/beacon_node/operation_pool/src/reward_cache.rs b/beacon_node/operation_pool/src/reward_cache.rs index a2ec77c389..5b9d4258e9 100644 --- a/beacon_node/operation_pool/src/reward_cache.rs +++ b/beacon_node/operation_pool/src/reward_cache.rs @@ -1,57 +1,45 @@ use crate::OpPoolError; -use std::collections::HashMap; +use bitvec::vec::BitVec; use types::{BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, ParticipationFlags}; -#[derive(Debug, Clone)] +#[derive(Debug, PartialEq, Eq, Clone)] struct Initialization { current_epoch: Epoch, - prev_epoch_last_block_root: Hash256, latest_block_root: Hash256, } -/// Cache to store validator effective balances and base rewards for block proposal. +/// Cache to store pre-computed information for block proposal. #[derive(Debug, Clone, Default)] pub struct RewardCache { initialization: Option, - /// Map from validator index to `effective_balance`. - effective_balances: HashMap, - /// Map from validator index to participation flags for the previous epoch. + /// `BitVec` of validator indices which don't have default participation flags for the prev. epoch. /// - /// Validators with non-zero participation for the previous epoch are omitted from this map - /// in order to keep its memory-usage as small as possible. - /// - // FIXME(sproul): choose between handling slashable attestations (keep all non-complete) and - // memory efficiency (keep all zero). - // FIXME(sproul): choose whether to filter inactive validators - previous_epoch_participation: HashMap, - /// Map from validator index to participation flags for the current epoch. - /// - /// Validators with complete participation for the current epoch are omitted from this map - /// in order to keep its memory-usage as small as possible. - current_epoch_participation: HashMap, + /// We choose to only track whether validators have *any* participation flag set because + /// it's impossible to include a new attestation which is better than the existing participation + /// UNLESS the validator makes a slashable attestation, and we assume that this is rare enough + /// that it's acceptable to be slightly sub-optimal in this case. + previous_epoch_participation: BitVec, + /// `BitVec` of validator indices which don't have default participation flags for the current epoch. + current_epoch_participation: BitVec, } impl RewardCache { - pub fn get_effective_balance(&self, validator_index: usize) -> Option { - self.effective_balances.get(&validator_index).copied() - } - - pub fn get_epoch_participation( + pub fn has_attested_in_epoch( &self, - validator_index: usize, + validator_index: u64, epoch: Epoch, - ) -> Result, OpPoolError> { + ) -> Result { if let Some(init) = &self.initialization { if init.current_epoch == epoch { - Ok(self + Ok(*self .current_epoch_participation - .get(&validator_index) - .copied()) + .get(validator_index as usize) + .ok_or(OpPoolError::RewardCacheOutOfBounds)?) } else if init.current_epoch == epoch + 1 { - Ok(self + Ok(*self .previous_epoch_participation - .get(&validator_index) - .copied()) + .get(validator_index as usize) + .ok_or(OpPoolError::RewardCacheOutOfBounds)?) } else { Err(OpPoolError::RewardCacheWrongEpoch) } @@ -60,60 +48,52 @@ impl RewardCache { } } + /// Return the root of the latest block applied to `state`. + /// + /// For simplicity at genesis we return the zero hash, which will cause one unnecessary + /// re-calculation in `update`. + fn latest_block_root(state: &BeaconState) -> Result { + if state.slot() == 0 { + Ok(Hash256::zero()) + } else { + Ok(*state + .get_block_root(state.slot() - 1) + .map_err(OpPoolError::RewardCacheGetBlockRoot)?) + } + } + /// Update the cache. pub fn update(&mut self, state: &BeaconState) -> Result<(), OpPoolError> { - let current_epoch = state.current_epoch(); - let prev_epoch_last_block_root = *state - .get_block_root(state.previous_epoch().start_slot(E::slots_per_epoch())) - .map_err(OpPoolError::RewardCacheGetBlockRoot)?; - let latest_block_root = *state - .get_block_root(state.slot() - 1) - .map_err(OpPoolError::RewardCacheGetBlockRoot)?; - - // If the `state` is from a new epoch or a different fork with a different last epoch block, - // then update the effective balance cache (the effective balances are liable to have - // changed at the epoch boundary). - // - // Similarly, update the previous epoch participation cache as previous epoch participation - // is now fixed. - if self.initialization.as_ref().map_or(true, |init| { - init.current_epoch != current_epoch - || init.prev_epoch_last_block_root != prev_epoch_last_block_root - }) { - self.update_effective_balances(state); - self.update_previous_epoch_participation(state) - .map_err(OpPoolError::RewardCacheUpdatePrevEpoch)?; + if matches!(state, BeaconState::Base(_)) { + return Ok(()); } - // The current epoch participation flags change every block, and will almost always need - // updating when this function is called at a new slot. + let current_epoch = state.current_epoch(); + let latest_block_root = Self::latest_block_root(state)?; + + let new_init = Initialization { + current_epoch, + latest_block_root, + }; + + // The participation flags change every block, and will almost always need updating when + // this function is called at a new slot. if self .initialization .as_ref() - .map_or(true, |init| init.latest_block_root != latest_block_root) + .map_or(true, |init| *init != new_init) { + self.update_previous_epoch_participation(state) + .map_err(OpPoolError::RewardCacheUpdatePrevEpoch)?; self.update_current_epoch_participation(state) .map_err(OpPoolError::RewardCacheUpdateCurrEpoch)?; + + self.initialization = Some(new_init); } - self.initialization = Some(Initialization { - current_epoch, - prev_epoch_last_block_root, - latest_block_root, - }); - Ok(()) } - fn update_effective_balances(&mut self, state: &BeaconState) { - self.effective_balances = state - .validators() - .iter() - .enumerate() - .map(|(i, val)| (i, val.effective_balance)) - .collect(); - } - fn update_previous_epoch_participation( &mut self, state: &BeaconState, @@ -122,9 +102,7 @@ impl RewardCache { self.previous_epoch_participation = state .previous_epoch_participation()? .iter() - .copied() - .enumerate() - .filter(|(_, participation)| *participation == default_participation) + .map(|participation| *participation != default_participation) .collect(); Ok(()) } @@ -137,9 +115,7 @@ impl RewardCache { self.current_epoch_participation = state .current_epoch_participation()? .iter() - .copied() - .enumerate() - .filter(|(_, participation)| *participation == default_participation) + .map(|participation| *participation != default_participation) .collect(); Ok(()) } diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index bc03d003ed..5691e9a979 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1,4 +1,5 @@ use clap::{App, Arg}; +use strum::VariantNames; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new("beacon_node") @@ -148,7 +149,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { If a DNS address is provided, the enr-address is set to the IP address it resolves to and \ does not auto-update based on PONG responses in discovery. \ Set this only if you are sure other nodes can connect to your local node on this address. \ - Discovery will automatically find your external address,if possible.") + Discovery will automatically find your external address, if possible.") .requires("enr-udp-port") .takes_value(true), ) @@ -229,8 +230,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http-disable-legacy-spec") .long("http-disable-legacy-spec") - .help("Disable serving of legacy data on the /config/spec endpoint. May be \ - disabled by default in a future release.") + .hidden(true) + ) + .arg( + Arg::with_name("http-spec-fork") + .long("http-spec-fork") + .value_name("FORK") + .help("Serve the spec for a specific hard fork on /eth/v1/config/spec. It should \ + not be necessary to set this flag.") + .takes_value(true) ) .arg( Arg::with_name("http-enable-tls") @@ -312,6 +320,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { and never provide an untrusted URL.") .takes_value(true), ) + .arg( + Arg::with_name("monitoring-endpoint-period") + .long("monitoring-endpoint-period") + .value_name("SECONDS") + .help("Defines how many seconds to wait between each message sent to \ + the monitoring-endpoint. Default: 60s") + .requires("monitoring-endpoint") + .takes_value(true), + ) /* * Standard staking flags @@ -320,9 +337,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("staking") .long("staking") - .help("Standard option for a staking beacon node. Equivalent to \ - `lighthouse bn --http --eth1 `. This will enable the http server on localhost:5052 \ - and try connecting to an eth1 node on localhost:8545") + .help("Standard option for a staking beacon node. This will enable the HTTP server \ + on localhost:5052 and import deposit logs from the execution node. This is \ + equivalent to `--http` on merge-ready networks, or `--http --eth1` pre-merge") .takes_value(false) ) @@ -377,72 +394,87 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("1000") .takes_value(true) ) + .arg( + Arg::with_name("eth1-cache-follow-distance") + .long("eth1-cache-follow-distance") + .value_name("BLOCKS") + .help("Specifies the distance between the Eth1 chain head and the last block which \ + should be imported into the cache. Setting this value lower can help \ + compensate for irregular Proof-of-Work block times, but setting it too low \ + can make the node vulnerable to re-orgs.") + .takes_value(true) + ) /* * Execution Layer Integration */ .arg( Arg::with_name("merge") .long("merge") - .help("Enable the features necessary to run merge testnets. This feature \ - is unstable and is for developers only.") - .takes_value(false), + .help("Deprecated. The feature activates automatically when --execution-endpoint \ + is supplied.") + .takes_value(false) + .hidden(true) ) .arg( - Arg::with_name("execution-endpoints") - .long("execution-endpoints") - .value_name("EXECUTION-ENDPOINTS") - .help("One or more comma-delimited server endpoints for HTTP JSON-RPC connection. \ - If multiple endpoints are given the endpoints are used as fallback in the \ - given order. Also enables the --merge flag. \ - If this flag is omitted and the --eth1-endpoints is supplied, those values \ - will be used. Defaults to http://127.0.0.1:8545.") + Arg::with_name("execution-endpoint") + .long("execution-endpoint") + .value_name("EXECUTION-ENDPOINT") + .alias("execution-endpoints") + .help("Server endpoint for an execution layer JWT-authenticated HTTP \ + JSON-RPC connection. Uses the same endpoint to populate the \ + deposit cache.") + .takes_value(true) + .requires("execution-jwt") + ) + .arg( + Arg::with_name("execution-jwt") + .long("execution-jwt") + .value_name("EXECUTION-JWT") + .alias("jwt-secrets") + .help("File path which contains the hex-encoded JWT secret for the \ + execution endpoint provided in the --execution-endpoint flag.") + .requires("execution-endpoint") .takes_value(true) ) .arg( - Arg::with_name("jwt-secrets") - .long("jwt-secrets") - .value_name("JWT-SECRETS") - .help("One or more comma-delimited file paths which contain the corresponding hex-encoded \ - JWT secrets for each execution endpoint provided in the --execution-endpoints flag. \ - The number of paths should be in the same order and strictly equal to the number \ - of execution endpoints provided.") - .takes_value(true) - .requires("execution-endpoints") - ) - .arg( - Arg::with_name("jwt-id") - .long("jwt-id") - .value_name("JWT-ID") + Arg::with_name("execution-jwt-id") + .long("execution-jwt-id") + .value_name("EXECUTION-JWT-ID") + .alias("jwt-id") .help("Used by the beacon node to communicate a unique identifier to execution nodes \ during JWT authentication. It corresponds to the 'id' field in the JWT claims object.\ - Set to empty by deafult") + Set to empty by default") + .requires("execution-jwt") .takes_value(true) ) .arg( - Arg::with_name("jwt-version") - .long("jwt-version") - .value_name("JWT-VERSION") + Arg::with_name("execution-jwt-version") + .long("execution-jwt-version") + .value_name("EXECUTION-JWT-VERSION") + .alias("jwt-version") .help("Used by the beacon node to communicate a client version to execution nodes \ during JWT authentication. It corresponds to the 'clv' field in the JWT claims object.\ - Set to empty by deafult") + Set to empty by default") + .requires("execution-jwt") .takes_value(true) ) .arg( Arg::with_name("suggested-fee-recipient") .long("suggested-fee-recipient") .value_name("SUGGESTED-FEE-RECIPIENT") - .help("Once the merge has happened, this address will receive transaction fees \ - collected from any blocks produced by this node. Defaults to a junk \ - address whilst the merge is in development stages. THE DEFAULT VALUE \ - WILL BE REMOVED BEFORE THE MERGE ENTERS PRODUCTION") - .requires("merge") + .help("Emergency fallback fee recipient for use in case the validator client does \ + not have one configured. You should set this flag on the validator \ + client instead of (or in addition to) setting it here.") + .requires("execution-endpoint") .takes_value(true) ) .arg( - Arg::with_name("payload-builders") - .long("payload-builders") + Arg::with_name("builder") + .long("builder") + .alias("payload-builder") + .alias("payload-builders") .help("The URL of a service compatible with the MEV-boost API.") - .requires("merge") + .requires("execution-endpoint") .takes_value(true) ) @@ -623,6 +655,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { [disabled by default].") .requires("slasher") ) + .arg( + Arg::with_name("slasher-backend") + .long("slasher-backend") + .value_name("DATABASE") + .help("Set the database backend to be used by the slasher.") + .takes_value(true) + .possible_values(slasher::DatabaseBackend::VARIANTS) + .requires("slasher") + ) .arg( Arg::with_name("wss-checkpoint") .long("wss-checkpoint") @@ -709,4 +750,94 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("250") .takes_value(true) ) + .arg( + Arg::with_name("paranoid-block-proposal") + .long("paranoid-block-proposal") + .help("Paranoid enough to be reading the source? Nice. This flag reverts some \ + block proposal optimisations and forces the node to check every attestation \ + it includes super thoroughly. This may be useful in an emergency, but not \ + otherwise.") + .hidden(true) + .takes_value(false) + ) + .arg( + Arg::with_name("builder-fallback-skips") + .long("builder-fallback-skips") + .help("If this node is proposing a block and has seen this number of skip slots \ + on the canonical chain in a row, it will NOT query any connected builders, \ + and will use the local execution engine for payload construction.") + .default_value("3") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-skips-per-epoch") + .long("builder-fallback-skips-per-epoch") + .help("If this node is proposing a block and has seen this number of skip slots \ + on the canonical chain in the past `SLOTS_PER_EPOCH`, it will NOT query \ + any connected builders, and will use the local execution engine for \ + payload construction.") + .default_value("8") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-epochs-since-finalization") + .long("builder-fallback-epochs-since-finalization") + .help("If this node is proposing a block and the chain has not finalized within \ + this number of epochs, it will NOT query any connected builders, \ + and will use the local execution engine for payload construction. Setting \ + this value to anything less than 2 will cause the node to NEVER query \ + connected builders. Setting it to 2 will cause this condition to be hit \ + if there are skips slots at the start of an epoch, right before this node \ + is set to propose.") + .default_value("3") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-disable-checks") + .long("builder-fallback-disable-checks") + .help("This flag disables all checks related to chain health. This means the builder \ + API will always be used for payload construction, regardless of recent chain \ + conditions.") + .takes_value(false) + ) + .arg( + Arg::with_name("builder-profit-threshold") + .long("builder-profit-threshold") + .value_name("WEI_VALUE") + .help("The minimum reward in wei provided to the proposer by a block builder for \ + an external payload to be considered for inclusion in a proposal. If this \ + threshold is not met, the local EE's payload will be used. This is currently \ + *NOT* in comparison to the value of the local EE's payload. It simply checks \ + whether the total proposer reward from an external payload is equal to or \ + greater than this value. In the future, a comparison to a local payload is \ + likely to be added. Example: Use 250000000000000000 to set the threshold to \ + 0.25 ETH.") + .default_value("0") + .takes_value(true) + ) + .arg( + Arg::with_name("count-unrealized") + .long("count-unrealized") + .hidden(true) + .help("Enables an alternative, potentially more performant FFG \ + vote tracking method.") + .takes_value(true) + .default_value("true") + ) + .arg( + Arg::with_name("count-unrealized-full") + .long("count-unrealized-full") + .hidden(true) + .help("Stricter version of `count-unrealized`.") + .takes_value(true) + .default_value("false") + ) + .arg( + Arg::with_name("reset-payload-statuses") + .long("reset-payload-statuses") + .help("When present, Lighthouse will forget the payload statuses of any \ + already-imported blocks. This can assist in the recovery from a consensus \ + failure caused by the execution layer.") + .takes_value(false) + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index ca266829de..40ef850eda 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -3,12 +3,15 @@ use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; +use genesis::Eth1Endpoint; use http_api::TlsConfig; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; use slog::{info, warn, Logger}; use std::cmp; use std::cmp::max; +use std::fmt::Debug; +use std::fmt::Write; use std::fs; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; use std::path::{Path, PathBuf}; @@ -113,7 +116,14 @@ pub fn get_config( } if cli_args.is_present("http-disable-legacy-spec") { - client_config.http_api.serve_legacy_spec = false; + warn!( + log, + "The flag --http-disable-legacy-spec is deprecated and will be removed" + ); + } + + if let Some(fork_name) = clap_utils::parse_optional(cli_args, "http-spec-fork")? { + client_config.http_api.spec_fork_name = Some(fork_name); } if cli_args.is_present("http-enable-tls") { @@ -168,9 +178,13 @@ pub fn get_config( * Explorer metrics */ if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + let update_period_secs = + clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; + client_config.monitoring_api = Some(monitoring_api::Config { db_path: None, freezer_db_path: None, + update_period_secs, monitoring_endpoint: monitoring_endpoint.to_string(), }); } @@ -215,15 +229,18 @@ pub fn get_config( "msg" => "please use --eth1-endpoints instead" ); client_config.sync_eth1_chain = true; - client_config.eth1.endpoints = vec![SensitiveUrl::parse(endpoint) + + let endpoints = vec![SensitiveUrl::parse(endpoint) .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?]; + client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); } else if let Some(endpoints) = cli_args.value_of("eth1-endpoints") { client_config.sync_eth1_chain = true; - client_config.eth1.endpoints = endpoints + let endpoints = endpoints .split(',') .map(SensitiveUrl::parse) .collect::>() .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; + client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints); } if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { @@ -236,47 +253,87 @@ pub fn get_config( client_config.eth1.purge_cache = true; } - if cli_args.is_present("merge") || cli_args.is_present("execution-endpoints") { + if let Some(follow_distance) = + clap_utils::parse_optional(cli_args, "eth1-cache-follow-distance")? + { + client_config.eth1.cache_follow_distance = Some(follow_distance); + } + + if cli_args.is_present("merge") { + if cli_args.is_present("execution-endpoint") { + warn!( + log, + "The --merge flag is deprecated"; + "info" => "the --execution-endpoint flag automatically enables this feature" + ) + } else { + return Err("The --merge flag is deprecated. \ + Supply a value to --execution-endpoint instead." + .into()); + } + } + + if let Some(endpoints) = cli_args.value_of("execution-endpoint") { let mut el_config = execution_layer::Config::default(); - if let Some(endpoints) = cli_args.value_of("execution-endpoints") { - client_config.sync_eth1_chain = true; - el_config.execution_endpoints = endpoints - .split(',') - .map(SensitiveUrl::parse) - .collect::>() - .map_err(|e| format!("execution-endpoints contains an invalid URL {:?}", e))?; - } else if cli_args.is_present("merge") { - el_config.execution_endpoints = client_config.eth1.endpoints.clone(); - } - - if let Some(endpoints) = cli_args.value_of("payload-builders") { - el_config.builder_endpoints = endpoints - .split(',') - .map(SensitiveUrl::parse) - .collect::>() - .map_err(|e| format!("payload-builders contains an invalid URL {:?}", e))?; - } - - if let Some(secrets) = cli_args.value_of("jwt-secrets") { - let secret_files: Vec<_> = secrets.split(',').map(PathBuf::from).collect(); - if !secret_files.is_empty() && secret_files.len() != el_config.execution_endpoints.len() - { - return Err(format!( - "{} execution-endpoints supplied with {} jwt-secrets. Lengths \ - must match or jwt-secrets must be empty.", - el_config.execution_endpoints.len(), - secret_files.len(), - )); - } - el_config.secret_files = secret_files; + // Always follow the deposit contract when there is an execution endpoint. + // + // This is wasteful for non-staking nodes as they have no need to process deposit contract + // logs and build an "eth1" cache. The alternative is to explicitly require the `--eth1` or + // `--staking` flags, however that poses a risk to stakers since they cannot produce blocks + // without "eth1". + // + // The waste for non-staking nodes is relatively small so we err on the side of safety for + // stakers. The merge is already complicated enough. + client_config.sync_eth1_chain = true; + + // Parse a single execution endpoint, logging warnings if multiple endpoints are supplied. + let execution_endpoint = + parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?; + + // Parse a single JWT secret, logging warnings if multiple are supplied. + // + // JWTs are required if `--execution-endpoint` is supplied. + let secret_files: String = clap_utils::parse_required(cli_args, "execution-jwt")?; + let secret_file = + parse_only_one_value(&secret_files, PathBuf::from_str, "--execution-jwt", log)?; + + // Parse and set the payload builder, if any. + if let Some(endpoint) = cli_args.value_of("builder") { + let payload_builder = + parse_only_one_value(endpoint, SensitiveUrl::parse, "--builder", log)?; + el_config.builder_url = Some(payload_builder); } + // Set config values from parse values. + el_config.secret_files = vec![secret_file.clone()]; + el_config.execution_endpoints = vec![execution_endpoint.clone()]; el_config.suggested_fee_recipient = clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?; - el_config.jwt_id = clap_utils::parse_optional(cli_args, "jwt-id")?; - el_config.jwt_version = clap_utils::parse_optional(cli_args, "jwt-version")?; + el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?; + el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?; el_config.default_datadir = client_config.data_dir.clone(); + el_config.builder_profit_threshold = + clap_utils::parse_required(cli_args, "builder-profit-threshold")?; + + // If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and + // use `--execution-endpoint` instead. Also, log a deprecation warning. + if cli_args.is_present("eth1-endpoints") || cli_args.is_present("eth1-endpoint") { + warn!( + log, + "Ignoring --eth1-endpoints flag"; + "info" => "the value for --execution-endpoint will be used instead. \ + --eth1-endpoints has been deprecated for post-merge configurations" + ); + } + client_config.eth1.endpoints = Eth1Endpoint::Auth { + endpoint: execution_endpoint, + jwt_path: secret_file, + jwt_id: el_config.jwt_id.clone(), + jwt_version: el_config.jwt_version.clone(), + }; + + // Store the EL config in the client config. client_config.execution_layer = Some(el_config); } @@ -342,7 +399,6 @@ pub fn get_config( client_config.eth1.follow_distance = spec.eth1_follow_distance; client_config.eth1.node_far_behind_seconds = max(5, spec.eth1_follow_distance / 2) * spec.seconds_per_eth1_block; - client_config.eth1.network_id = spec.deposit_network_id.into(); client_config.eth1.chain_id = spec.deposit_chain_id.into(); client_config.eth1.set_block_cache_truncation::(spec); @@ -545,6 +601,10 @@ pub fn get_config( slasher_config.broadcast = cli_args.is_present("slasher-broadcast"); + if let Some(backend) = clap_utils::parse_optional(cli_args, "slasher-backend")? { + slasher_config.backend = backend; + } + client_config.slasher = Some(slasher_config); } @@ -591,6 +651,30 @@ pub fn get_config( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } + client_config.chain.count_unrealized = + clap_utils::parse_required(cli_args, "count-unrealized")?; + client_config.chain.count_unrealized_full = + clap_utils::parse_required::(cli_args, "count-unrealized-full")?.into(); + + client_config.chain.always_reset_payload_statuses = + cli_args.is_present("reset-payload-statuses"); + + client_config.chain.paranoid_block_proposal = cli_args.is_present("paranoid-block-proposal"); + + /* + * Builder fallback configs. + */ + client_config.chain.builder_fallback_skips = + clap_utils::parse_required(cli_args, "builder-fallback-skips")?; + client_config.chain.builder_fallback_skips_per_epoch = + clap_utils::parse_required(cli_args, "builder-fallback-skips-per-epoch")?; + client_config + .chain + .builder_fallback_epochs_since_finalization = + clap_utils::parse_required(cli_args, "builder-fallback-epochs-since-finalization")?; + client_config.chain.builder_fallback_disable_checks = + cli_args.is_present("builder-fallback-disable-checks"); + Ok(client_config) } @@ -746,7 +830,8 @@ pub fn set_network_config( None }) { - addr.push_str(&format!(":{}", enr_udp_port)); + write!(addr, ":{}", enr_udp_port) + .map_err(|e| format!("Failed to write enr address {}", e))?; } else { return Err( "enr-udp-port must be set for node to be discoverable with dns address" @@ -842,3 +927,38 @@ pub fn get_slots_per_restore_point( Ok((default, false)) } } + +/// Parses the `cli_value` as a comma-separated string of values to be parsed with `parser`. +/// +/// If there is more than one value, log a warning. If there are no values, return an error. +pub fn parse_only_one_value( + cli_value: &str, + parser: F, + flag_name: &str, + log: &Logger, +) -> Result +where + F: Fn(&str) -> Result, + E: Debug, +{ + let values = cli_value + .split(',') + .map(parser) + .collect::, _>>() + .map_err(|e| format!("{} contains an invalid value {:?}", flag_name, e))?; + + if values.len() > 1 { + warn!( + log, + "Multiple values provided"; + "info" => "multiple values are deprecated, only the first value will be used", + "count" => values.len(), + "flag" => flag_name + ); + } + + values + .into_iter() + .next() + .ok_or(format!("Must provide at least one value to {}", flag_name)) +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4b85daffc0..4f56956b8b 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -636,7 +636,11 @@ impl, Cold: ItemStore> HotColdDB for op in batch { match op { StoreOp::PutBlock(block_root, block) => { - self.block_as_kv_store_ops(&block_root, *block, &mut key_value_batch)?; + self.block_as_kv_store_ops( + &block_root, + block.as_ref().clone(), + &mut key_value_batch, + )?; } StoreOp::PutState(state_root, state) => { @@ -1522,7 +1526,7 @@ impl, Cold: ItemStore> HotColdDB } /// Load a frozen state's slot, given its root. - fn load_cold_state_slot(&self, state_root: &Hash256) -> Result, Error> { + pub fn load_cold_state_slot(&self, state_root: &Hash256) -> Result, Error> { Ok(self .cold_db .get(state_root)? @@ -1808,6 +1812,7 @@ impl StoreItem for Split { /// Struct for summarising a state in the hot database. /// /// Allows full reconstruction by replaying blocks. +// FIXME(sproul): change to V20 #[superstruct( variants(V1, V10), variant_attributes(derive(Debug, Clone, Copy, Default, Encode, Decode)), diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 01a6a1b145..6d45b59544 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -212,7 +212,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, (Err(BeaconStateError::SlotOutOfBounds), Err(BeaconStateError::SlotOutOfBounds)) => { // Read a `BeaconState` from the store that has access to prior historical roots. if let Some(beacon_state) = - next_historical_root_backtrack_state(&*self.store, &self.beacon_state) + next_historical_root_backtrack_state(self.store, &self.beacon_state) .handle_unavailable()? { self.beacon_state = Cow::Owned(beacon_state); diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index e3f8b31e1d..0ddbf1abb7 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -42,6 +42,7 @@ pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; pub use metrics::scrape_for_metrics; use parking_lot::MutexGuard; +use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; pub use types::*; @@ -155,7 +156,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Reified key-value storage operation. Helps in modifying the storage atomically. /// See also https://github.com/sigp/lighthouse/issues/692 pub enum StoreOp<'a, E: EthSpec> { - PutBlock(Hash256, Box>), + PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), PutStateTemporaryFlag(Hash256), DeleteStateTemporaryFlag(Hash256), @@ -213,6 +214,9 @@ pub enum DBColumn { BeaconRandaoMixes, #[strum(serialize = "dht")] DhtEnrs, + /// For Optimistically Imported Merge Transition Blocks + #[strum(serialize = "otb")] + OptimisticTransitionBlock, } /// A block from the database, which might have an execution payload or not. diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 3ff39c67f7..1473f59a4e 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,14 +1,17 @@ use super::{Error, ItemStore, KeyValueStore, KeyValueStoreOp}; +use crate::{ColumnIter, DBColumn}; use parking_lot::{Mutex, MutexGuard, RwLock}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use types::*; type DBHashMap = HashMap, Vec>; +type DBKeyMap = HashMap, HashSet>>; /// A thread-safe `HashMap` wrapper. pub struct MemoryStore { db: RwLock, + col_keys: RwLock, transaction_mutex: Mutex<()>, _phantom: PhantomData, } @@ -18,6 +21,7 @@ impl MemoryStore { pub fn open() -> Self { Self { db: RwLock::new(HashMap::new()), + col_keys: RwLock::new(HashMap::new()), transaction_mutex: Mutex::new(()), _phantom: PhantomData, } @@ -41,6 +45,11 @@ impl KeyValueStore for MemoryStore { fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db.write().insert(column_key, val.to_vec()); + self.col_keys + .write() + .entry(col.as_bytes().to_vec()) + .or_insert_with(HashSet::new) + .insert(key.to_vec()); Ok(()) } @@ -63,6 +72,10 @@ impl KeyValueStore for MemoryStore { fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db.write().remove(&column_key); + self.col_keys + .write() + .get_mut(&col.as_bytes().to_vec()) + .map(|set| set.remove(key)); Ok(()) } @@ -81,6 +94,26 @@ impl KeyValueStore for MemoryStore { Ok(()) } + // pub type ColumnIter<'a> = Box), Error>> + 'a>; + fn iter_column(&self, column: DBColumn) -> ColumnIter { + let col = column.as_str(); + if let Some(keys) = self + .col_keys + .read() + .get(col.as_bytes()) + .map(|set| set.iter().cloned().collect::>()) + { + Box::new(keys.into_iter().filter_map(move |key| { + let hash = Hash256::from_slice(&key); + self.get_bytes(col, &key) + .transpose() + .map(|res| res.map(|bytes| (hash, bytes))) + })) + } else { + Box::new(std::iter::empty()) + } + } + fn begin_rw_transaction(&self) -> MutexGuard<()> { self.transaction_mutex.lock() } diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index 9c6bf1ca87..944846c863 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -3,56 +3,34 @@ //! This service allows task execution on the beacon node for various functionality. use beacon_chain::{BeaconChain, BeaconChainTypes}; -use slog::{debug, info, warn}; +use slog::{info, warn}; use slot_clock::SlotClock; use std::sync::Arc; -use std::time::Duration; -use tokio::time::{interval_at, Instant}; +use tokio::time::sleep; /// Spawns a timer service which periodically executes tasks for the beacon chain pub fn spawn_timer( executor: task_executor::TaskExecutor, beacon_chain: Arc>, - seconds_per_slot: u64, ) -> Result<(), &'static str> { - let log = executor.log(); - let start_instant = Instant::now() - + beacon_chain - .slot_clock - .duration_to_next_slot() - .ok_or("slot_notifier unable to determine time to next slot")?; - - // Warning: `interval_at` panics if `seconds_per_slot` = 0. - let mut interval = interval_at(start_instant, Duration::from_secs(seconds_per_slot)); - let per_slot_executor = executor.clone(); + let log = executor.log().clone(); let timer_future = async move { - let log = per_slot_executor.log().clone(); loop { - interval.tick().await; - let chain = beacon_chain.clone(); - if let Some(handle) = per_slot_executor - .spawn_blocking_handle(move || chain.per_slot_task(), "timer_per_slot_task") - { - if let Err(e) = handle.await { - warn!( - log, - "Per slot task failed"; - "info" => ?e - ); + let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() { + Some(duration) => duration, + None => { + warn!(log, "Unable to determine duration to next slot"); + return; } - } else { - debug!( - log, - "Per slot task timer stopped"; - "info" => "shutting down" - ); - break; - } + }; + + sleep(duration_to_next_slot).await; + beacon_chain.per_slot_task().await; } }; executor.spawn(timer_future, "timer"); - info!(log, "Timer service started"); + info!(executor.log(), "Timer service started"); Ok(()) } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 871b2c4ba8..d05677465b 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -3,6 +3,7 @@ * [Introduction](./intro.md) * [Become a Validator](./mainnet-validator.md) * [Become a Testnet Validator](./testnet-validator.md) +* [Merge Migration](./merge-migration.md) * [Installation](./installation.md) * [System Requirements](./system-requirements.md) * [Pre-Built Binaries](./installation-binaries.md) @@ -17,21 +18,21 @@ * [Create a validator](./validator-create.md) * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) - * [Importing from the Staking Launchpad](./validator-import-launchpad.md) + * [Importing from the Staking Launchpad](./validator-import-launchpad.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Validator Monitoring](./validator-monitoring.md) * [Doppelganger Protection](./validator-doppelganger.md) * [Suggested Fee Recipient](./suggested-fee-recipient.md) * [APIs](./api.md) - * [Beacon Node API](./api-bn.md) - * [/lighthouse](./api-lighthouse.md) - * [Validator Inclusion APIs](./validator-inclusion.md) - * [Validator Client API](./api-vc.md) - * [Endpoints](./api-vc-endpoints.md) - * [Authorization Header](./api-vc-auth-header.md) - * [Signature Header](./api-vc-sig-header.md) - * [Prometheus Metrics](./advanced_metrics.md) + * [Beacon Node API](./api-bn.md) + * [/lighthouse](./api-lighthouse.md) + * [Validator Inclusion APIs](./validator-inclusion.md) + * [Validator Client API](./api-vc.md) + * [Endpoints](./api-vc-endpoints.md) + * [Authorization Header](./api-vc-auth-header.md) + * [Signature Header](./api-vc-sig-header.md) + * [Prometheus Metrics](./advanced_metrics.md) * [Advanced Usage](./advanced.md) * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) @@ -43,6 +44,8 @@ * [Running a Slasher](./slasher.md) * [Redundancy](./redundancy.md) * [Pre-Releases](./advanced-pre-releases.md) + * [Release Candidates](./advanced-release-candidates.md) + * [MEV and Lighthouse](./builders.md) * [Contributing](./contributing.md) - * [Development Environment](./setup.md) + * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/advanced-datadir.md b/book/src/advanced-datadir.md index 9f81112bdd..074857346e 100644 --- a/book/src/advanced-datadir.md +++ b/book/src/advanced-datadir.md @@ -55,5 +55,5 @@ In this case, the user could solve this warn by following these steps: 1. Restarting the BN process Although there are no known issues with using backwards compatibility functionality, having split -directories is likely to cause confusion for users. Therefore, we recommend affected users migrate +directories is likely to cause confusion for users. Therefore, we recommend that affected users migrate to a consolidated directory structure. diff --git a/book/src/advanced-pre-releases.md b/book/src/advanced-pre-releases.md index b90bd631d4..f3f4a52304 100644 --- a/book/src/advanced-pre-releases.md +++ b/book/src/advanced-pre-releases.md @@ -1,4 +1,4 @@ # Pre-Releases -Pre-releases are now referred to as [Release Candidates][./advanced-pre-releases.md]. The terms may +Pre-releases are now referred to as [Release Candidates](./advanced-release-candidates.md). The terms may be used interchangeably. diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 178936cf61..397d9a28b5 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -23,11 +23,11 @@ states to slow down dramatically. A lower _slots per restore point_ value (SPRP) frequent restore points, while a higher SPRP corresponds to less frequent. The table below shows some example values. -| Use Case | SPRP | Yearly Disk Usage | Load Historical State | -| ---------------------- | -------------- | ----------------- | --------------------- | -| Block explorer/analysis | 32 | 1.4 TB | 155 ms | -| Hobbyist (prev. default) | 2048 | 23.1 GB | 10.2 s | -| Validator only (default) | 8192 | 5.7 GB | 41 s | +| Use Case | SPRP | Yearly Disk Usage | Load Historical State | +|--------------------------|------|-------------------|-----------------------| +| Block explorer/analysis | 32 | 1.4 TB | 155 ms | +| Hobbyist (prev. default) | 2048 | 23.1 GB | 10.2 s | +| Validator only (default) | 8192 | 5.7 GB | 41 s | As you can see, it's a high-stakes trade-off! The relationships to disk usage and historical state load time are both linear – doubling SPRP halves disk usage and doubles load time. The minimum SPRP diff --git a/book/src/advanced_metrics.md b/book/src/advanced_metrics.md index 0d1aa345bf..3141f336a1 100644 --- a/book/src/advanced_metrics.md +++ b/book/src/advanced_metrics.md @@ -48,3 +48,39 @@ Check to ensure that the metrics are available on the default port: ```bash curl localhost:5064/metrics ``` + +## Remote Monitoring + +Lighthouse has the ability to send a subset of metrics to a remote server for collection. Presently +the main server offering remote monitoring is beaconcha.in. Instructions for setting this up +can be found in beaconcha.in's docs: + +- + +The Lighthouse flag for setting the monitoring URL is `--monitoring-endpoint`. + +When sending metrics to a remote server you should be conscious of security: + +- Only use a monitoring service that you trust: you are sending detailed information about + your validators and beacon node to this service which could be used to track you. +- Always use an HTTPS URL to prevent the traffic being intercepted in transit. + +The specification for the monitoring endpoint can be found here: + +- + +_Note: the similarly named [Validator Monitor](./validator-monitoring.md) feature is entirely +independent of remote metric monitoring_. + +### Update Period + +You can adjust the frequency at which Lighthouse sends metrics to the remote server using the +`--monitoring-endpoint-period` flag. It takes an integer value in seconds, defaulting to 60 +seconds. + +``` +lighthouse bn --monitoring-endpoint-period 60 --monitoring-endpoint "https://url" +``` + +Increasing the monitoring period between can be useful if you are running into rate limits when +posting large amounts of data for multiple nodes. diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 71155a1c23..d6fcb82a6b 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -22,7 +22,7 @@ Having a large peer count means that your node must act as an honest RPC server to all your connected peers. If there are many that are syncing, they will often be requesting a large number of blocks from your node. This means your node must perform a lot of work reading and responding to these peers. If your -node is over-loaded with peers and cannot respond in time, other Lighthouse +node is overloaded with peers and cannot respond in time, other Lighthouse peers will consider you non-performant and disfavour you from their peer stores. Your node will also have to handle and manage the gossip and extra bandwidth that comes from having these extra peers. Having a non-responsive @@ -63,7 +63,7 @@ settings allow you construct your initial ENR. Their primary intention is for setting up boot-like nodes and having a contactable ENR on boot. On normal operation of a Lighthouse node, none of these flags need to be set. Setting these flags incorrectly can lead to your node being incorrectly added to the -global DHT which will degrades the discovery process for all Ethereum consensus peers. +global DHT which will degrade the discovery process for all Ethereum consensus peers. The ENR of a Lighthouse node is initially set to be non-contactable. The in-built discovery mechanism can determine if your node is publicly accessible, diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index f5c4542b9e..d9c8080b4d 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -453,4 +453,23 @@ Caveats: loading a state on a boundary is most efficient. [block_reward_src]: -https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs \ No newline at end of file +https://github.com/sigp/lighthouse/tree/unstable/common/eth2/src/lighthouse/block_rewards.rs + + +### `/lighthouse/merge_readiness` + +```bash +curl -X GET "http://localhost:5052/lighthouse/merge_readiness" +``` + +``` +{ + "data":{ + "type":"ready", + "config":{ + "terminal_total_difficulty":"6400" + }, + "current_difficulty":"4800" + } + } +``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index ae091130f3..9aedf6e249 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -24,12 +24,12 @@ Returns the software version and `git` commit hash for the Lighthouse binary. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/version` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/version` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Response Body @@ -47,12 +47,12 @@ Returns information regarding the health of the host machine. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/health` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/health` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | *Note: this endpoint is presently only available on Linux.* @@ -83,12 +83,12 @@ Returns the Ethereum proof-of-stake consensus specification loaded for this vali ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/spec` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/spec` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Response Body @@ -134,6 +134,7 @@ Typical Responses | 200 "DOMAIN_VOLUNTARY_EXIT": "0x04000000", "DOMAIN_SELECTION_PROOF": "0x05000000", "DOMAIN_AGGREGATE_AND_PROOF": "0x06000000", + "DOMAIN_APPLICATION_MASK": "0x00000001", "MAX_VALIDATORS_PER_COMMITTEE": "2048", "SLOTS_PER_EPOCH": "32", "EPOCHS_PER_ETH1_VOTING_PERIOD": "32", @@ -167,12 +168,12 @@ file may be read by a local user with access rights. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/auth` -Method | GET -Required Headers | - -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------| +| Path | `/lighthouse/auth` | +| Method | GET | +| Required Headers | - | +| Typical Responses | 200 | ### Example Path @@ -194,12 +195,12 @@ Lists all validators managed by this validator client. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Response Body @@ -231,12 +232,12 @@ Get a validator by their `voting_pubkey`. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/:voting_pubkey` -Method | GET -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 400 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | ### Example Path @@ -261,12 +262,12 @@ Update some values for the validator with `voting_pubkey`. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/:voting_pubkey` -Method | PATCH -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 400 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | PATCH | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | ### Example Path @@ -300,12 +301,12 @@ Validators are generated from the mnemonic according to ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Request Body @@ -358,12 +359,12 @@ Import a keystore into the validator client. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/keystore` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/keystore` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Request Body @@ -432,12 +433,12 @@ generated with the path `m/12381/3600/i/42`. ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/mnemonic` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/mnemonic` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200 | ### Example Request Body @@ -478,12 +479,12 @@ Create any number of new validators, all of which will refer to a ### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/lighthouse/validators/web3signer` -Method | POST -Required Headers | [`Authorization`](./api-vc-auth-header.md) -Typical Responses | 200, 400 +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/web3signer` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | ### Example Request Body diff --git a/book/src/builders.md b/book/src/builders.md new file mode 100644 index 0000000000..109a75a040 --- /dev/null +++ b/book/src/builders.md @@ -0,0 +1,176 @@ +# MEV and Lighthouse + +Lighthouse is able to interact with servers that implement the [builder +API](https://github.com/ethereum/builder-specs), allowing it to produce blocks without having +knowledge of the transactions included in the block. This enables Lighthouse to outsource the job of +transaction gathering/ordering within a block to parties specialized in this particular task. For +economic reasons, these parties will refuse to reveal the list of transactions to the validator +before the validator has committed to (i.e. signed) the block. A primer on MEV can be found +[here](https://ethereum.org/en/developers/docs/mev). + +Using the builder API is not known to introduce additional slashing risks, however a live-ness risk +(i.e. the ability for the chain to produce valid blocks) is introduced because your node will be +signing blocks without executing the transactions within the block. Therefore, it won't know whether +the transactions are valid, and it may sign a block that the network will reject. This would lead to +a missed proposal and the opportunity cost of lost block rewards. + +## How to connect to a builder + +The beacon node and validator client each require a new flag for lighthouse to be fully compatible with builder API servers. + +``` +lighthouse bn --builder https://mainnet-builder.test +``` +The `--builder` flag will cause the beacon node to query the provided URL during block production for a block +payload with stubbed-out transactions. If this request fails, Lighthouse will fall back to the local +execution engine and produce a block using transactions gathered and verified locally. + +The beacon node will *only* query for this type of block (a "blinded" block) when a validator specifically requests it. +Otherwise, it will continue to serve full blocks as normal. In order to configure the validator client to query for +blinded blocks, you should use the following flag: + +``` +lighthouse vc --builder-proposals +``` +With the `--builder-proposals` flag, the validator client will ask for blinded blocks for all validators it manages. +In order to configure whether a validator queries for blinded blocks check out [this section.](#validator-client-configuration) + +## Multiple builders + +Lighthouse currently only supports a connection to a single builder. If you'd like to connect to multiple builders or +relays, run one of the following services and configure lighthouse to use it with the `--builder` flag. + +* [`mev-boost`][mev-boost] +* [`mev-rs`][mev-rs] + +## Validator Client Configuration + +In the validator client you can configure gas limit and fee recipient on a per-validator basis. If no gas limit is +configured, Lighthouse will use a default gas limit of 30,000,000, which is the current default value used in execution +engines. You can also enable or disable use of external builders on a per-validator basis rather than using +`--builder-proposals`, which enables external builders for all validators. In order to manage these configurations +per-validator, you can either make updates to the `validator_definitions.yml` file or you can use the HTTP requests +described below. + +Both the gas limit and fee recipient will be passed along as suggestions to connected builders. If there is a discrepancy +in either, it will *not* keep you from proposing a block with the builder. This is because the bounds on gas limit are +calculated based on prior execution blocks, so an honest external builder will make sure that even if your +requested gas limit value is out of the specified range, a valid gas limit in the direction of your request will be +used in constructing the block. Depending on the connected relay, payment to the proposer might be in the form of a +transaction within the block to the fee recipient, so a discrepancy in fee recipient might not indicate that there +is something afoot. + +> Note: The gas limit configured here is effectively a vote on block size, so the configuration should not be taken lightly. +> 30,000,000 is currently seen as a value balancing block size with how expensive it is for +> the network to validate blocks. So if you don't feel comfortable making an informed "vote", using the default value is +> encouraged. We will update the default value if the community reaches a rough consensus on a new value. + +### Set Gas Limit via HTTP + +To update gas limit per-validator you can use the [standard key manager API][gas-limit-api]. + +Alternatively, you can use the [lighthouse API](api-vc-endpoints.md). See below for an example. + +### Enable/Disable builder proposals via HTTP + +Use the [lighthouse API](api-vc-endpoints.md) to enable/disable use of the builder API on a per-validator basis. +You can also update the configured gas limit with these requests. + +#### `PATCH /lighthouse/validators/:voting_pubkey` + + +#### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | PATCH | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | + +#### Example Path + +``` +localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde +``` + +#### Example Request Body +Each field is optional. +```json +{ + "builder_proposals": true, + "gas_limit": 30000001 +} +``` + +#### Example Response Body + +```json +null +``` +### Fee Recipient + +Refer to [suggested fee recipient](suggested-fee-recipient.md) documentation. + +### Validator definitions example + +You can also directly configure these fields in the `validator_definitions.yml` file. + +``` +--- +- enabled: true + voting_public_key: "0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007" + type: local_keystore + voting_keystore_path: /home/paul/.lighthouse/validators/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007/voting-keystore.json + voting_keystore_password_path: /home/paul/.lighthouse/secrets/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 + suggested_fee_recipient: "0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21" + gas_limit: 30000001 + builder_proposals: true +- enabled: false + voting_public_key: "0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477" + type: local_keystore voting_keystore_path: /home/paul/.lighthouse/validators/0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477/voting-keystore.json + voting_keystore_password: myStrongpa55word123&$ + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: 33333333 + builder_proposals: true +``` + +## Circuit breaker conditions + +By outsourcing payload construction and signing blocks without verifying transactions, we are creating a new risk to +live-ness. If most of the network is using a small set of relays and one is bugged, a string of missed proposals could +happen quickly. This is not only generally bad for the network, but if you have a proposal coming up, you might not +realize that your next proposal is likely to be missed until it's too late. So we've implemented some "chain health" +checks to try and avoid scenarios like this. + +By default, Lighthouse is strict with these conditions, but we encourage users to learn about and adjust them. + +- `--builder-fallback-skips` - If we've seen this number of skip slots on the canonical chain in a row prior to proposing, we will NOT query + any connected builders, and will use the local execution engine for payload construction. +- `--builder-fallback-skips-per-epoch` - If we've seen this number of skip slots on the canonical chain in the past `SLOTS_PER_EPOCH`, we will NOT + query any connected builders, and will use the local execution engine for payload construction. +- `--builder-fallback-epochs-since-finalization` - If we're proposing and the chain has not finalized within + this number of epochs, we will NOT query any connected builders, and will use the local execution engine for payload + construction. Setting this value to anything less than 2 will cause the node to NEVER query connected builders. Setting + it to 2 will cause this condition to be hit if there are skips slots at the start of an epoch, right before this node + is set to propose. +- `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder + API will always be used for payload construction, regardless of recent chain conditions. + +## Builder Profit Threshold + +If you are generally uneasy with the risks associated with outsourced payload production (liveness/censorship) but would +consider using it for the chance of out-sized rewards, this flag may be useful: + +`--builder-profit-threshold ` + +The number provided indicates the minimum reward that an external payload must provide the proposer for it to be considered +for inclusion in a proposal. For example, if you'd only like to use an external payload for a reward of >= 0.25 ETH, you +would provide your beacon node with `--builder-profit-threshold 250000000000000000`. If it's your turn to propose and the +most valuable payload offered by builders is only 0.1 ETH, the local execution engine's payload will be used. Currently, +this threshold just looks at the value of the external payload. No comparison to the local payload is made, although +this feature will likely be added in the future. + +[mev-rs]: https://github.com/ralexstokes/mev-rs +[mev-boost]: https://github.com/flashbots/mev-boost +[gas-limit-api]: https://ethereum.github.io/keymanager-APIs/#/Gas%20Limit diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index fc878f5f65..736aa08f1c 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -41,6 +41,13 @@ Once the checkpoint is loaded Lighthouse will sync forwards to the head of the c If a validator client is connected to the node then it will be able to start completing its duties as soon as forwards sync completes. +### Use a community checkpoint sync endpoint + +The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the url for the `--checkpoint-sync-url` flag. e.g. +``` +lighthouse bn --checkpoint-sync-url https://example.com/ ... +``` + ### Use Infura as a remote beacon node provider You can use Infura as the remote beacon node provider to load the initial checkpoint state. diff --git a/book/src/contributing.md b/book/src/contributing.md index 9204ff8463..6b84843a69 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -1,6 +1,7 @@ # Contributing to Lighthouse [![Chat Badge]][Chat Link] +[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/sigp/lighthouse/badge)](https://www.gitpoap.io/gh/sigp/lighthouse) [Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da [Chat Link]: https://discord.gg/cyAszAh @@ -33,7 +34,7 @@ Lighthouse maintains two permanent branches: - [`stable`][stable]: Always points to the latest stable release. - This is ideal for most users. - [`unstable`][unstable]: Used for development, contains the latest PRs. - - Developers should base thier PRs on this branch. + - Developers should base their PRs on this branch. ## Ethereum consensus client diff --git a/book/src/cross-compiling.md b/book/src/cross-compiling.md index 9b458078e2..8ccf23da9d 100644 --- a/book/src/cross-compiling.md +++ b/book/src/cross-compiling.md @@ -38,3 +38,9 @@ make build-aarch64 The `lighthouse` binary will be compiled inside a Docker container and placed in `lighthouse/target/aarch64-unknown-linux-gnu/release`. + +## Feature Flags + +When using the makefile the set of features used for building can be controlled with +the environment variable `CROSS_FEATURES`. See [Feature + Flags](./installation-source.md#feature-flags) for available features. diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index ce7ff21328..c31e373b48 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -12,15 +12,31 @@ command for applying database downgrades. **Everything on this page applies to the Lighthouse _beacon node_, not to the validator client or the slasher**. +## List of schema versions + +| Lighthouse version | Release date | Schema version | Downgrade available? | +|--------------------|--------------|----------------|----------------------| +| v2.0.0 | Oct 2021 | v5 | no | +| v2.1.0 | Jan 2022 | v8 | no | +| v2.2.0 | Apr 2022 | v8 | no | +| v2.3.0 | May 2022 | v9 | yes (pre Bellatrix) | +| v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) | +| v2.5.0 | Aug 2022 | v11 | yes | +| v3.0.0 | Aug 2022 | v11 | yes | +| v3.1.0 | Sep 2022 | v12 | yes | + +> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release +> (e.g. v2.3.0). + ## How to apply a database downgrade To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. 1. Make sure you have a copy of the latest version of Lighthouse. This will be the version that knows about the latest schema change, and has the ability to revert it. -2. Work out the schema version you would like to downgrade to by checking the Lighthouse release - notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version from v8 to v9, then - you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. +2. Work out the schema version you would like to downgrade to by checking the table above, or the + Lighthouse release notes. E.g. if you want to downgrade from v2.3.0, which upgraded the version + from v8 to v9, then you'll want to _downgrade_ to v8 in order to run v2.2.x or earlier. 3. **Ensure that downgrading is feasible**. Not all schema upgrades can be reverted, and some of them are time-sensitive. The release notes will state whether a downgrade is available and whether any caveats apply to it. diff --git a/book/src/docker.md b/book/src/docker.md index 9a0378f091..f22b8a2008 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -73,7 +73,7 @@ The `stability` is: The `arch` is: * `-amd64` for x86_64, e.g. Intel, AMD -* `-arm64` for aarch64, e.g. Rasperry Pi 4 +* `-arm64` for aarch64, e.g. Raspberry Pi 4 * empty for a multi-arch image (works on either `amd64` or `arm64` platforms) The `modernity` is: diff --git a/book/src/faq.md b/book/src/faq.md index e14947fb05..5bfae3fa87 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -1,14 +1,14 @@ # Frequently Asked Questions - [Why does it take so long for a validator to be activated?](#why-does-it-take-so-long-for-a-validator-to-be-activated) -- [Do I need to set up any port mappings](#do-i-need-to-set-up-any-port-mappings) +- [Do I need to set up any port mappings?](#do-i-need-to-set-up-any-port-mappings) - [I have a low peer count and it is not increasing](#i-have-a-low-peer-count-and-it-is-not-increasing) - [What should I do if I lose my slashing protection database?](#what-should-i-do-if-i-lose-my-slashing-protection-database) - [How do I update lighthouse?](#how-do-i-update-lighthouse) - [I can't compile lighthouse](#i-cant-compile-lighthouse) -- [What is "Syncing eth1 block cache"](#what-is-syncing-eth1-block-cache) +- [What is "Syncing deposit contract block cache"?](#what-is-syncing-deposit-contract-block-cache) - [Can I use redundancy in my staking setup?](#can-i-use-redundancy-in-my-staking-setup) -- [How can I monitor my validators](#how-can-i-monitor-my-validators) +- [How can I monitor my validators?](#how-can-i-monitor-my-validators) ### Why does it take so long for a validator to be activated? @@ -86,7 +86,7 @@ repeats until the queue is cleared. Once a validator has been activated, there's no more waiting! It's time to produce blocks and attestations! -### Do I need to set up any port mappings +### Do I need to set up any port mappings? It is not strictly required to open any ports for Lighthouse to connect and participate in the network. Lighthouse should work out-of-the-box. However, if @@ -154,10 +154,10 @@ You will just also need to make sure the code you have checked out is up to date See [here.](./installation-source.md#troubleshooting) -### What is "Syncing eth1 block cache" +### What is "Syncing deposit contract block cache"? ``` -Nov 30 21:04:28.268 WARN Syncing eth1 block cache est_blocks_remaining: initializing deposits, service: slot_notifier +Nov 30 21:04:28.268 WARN Syncing deposit contract block cache est_blocks_remaining: initializing deposits, service: slot_notifier ``` This log indicates that your beacon node is downloading blocks and deposits diff --git a/book/src/graffiti.md b/book/src/graffiti.md index d657c9229c..75c2a86dd5 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -49,10 +49,12 @@ Below is an example of the validator_definitions.yml with validator specific gra ### 3. Using the "--graffiti" flag on the validator client Users can specify a common graffiti for all their validators using the `--graffiti` flag on the validator client. +Usage: `lighthouse vc --graffiti example` + ### 4. Using the "--graffiti" flag on the beacon node Users can also specify a common graffiti using the `--graffiti` flag on the beacon node as a common graffiti for all validators. -Usage: `lighthouse vc --graffiti fortytwo` +Usage: `lighthouse bn --graffiti fortytwo` > Note: The order of preference for loading the graffiti is as follows: > 1. Read from `--graffiti-file` if provided. diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md index 7a5aad32bf..2365ea7ed7 100644 --- a/book/src/installation-binaries.md +++ b/book/src/installation-binaries.md @@ -4,8 +4,6 @@ Each Lighthouse release contains several downloadable binaries in the "Assets" section of the release. You can find the [releases on Github](https://github.com/sigp/lighthouse/releases). -> Note: binaries are provided for Windows native, but Windows Lighthouse support is still in beta testing. - ## Platforms Binaries are supplied for four platforms: @@ -13,7 +11,7 @@ Binaries are supplied for four platforms: - `x86_64-unknown-linux-gnu`: AMD/Intel 64-bit processors (most desktops, laptops, servers) - `aarch64-unknown-linux-gnu`: 64-bit ARM processors (Raspberry Pi 4) - `x86_64-apple-darwin`: macOS with Intel chips -- `x86_64-windows`: Windows with 64-bit processors (Beta) +- `x86_64-windows`: Windows with 64-bit processors Additionally there is also a `-portable` suffix which indicates if the `portable` feature is used: diff --git a/book/src/installation-priorities.md b/book/src/installation-priorities.md index 69d871c396..0008e327b7 100644 --- a/book/src/installation-priorities.md +++ b/book/src/installation-priorities.md @@ -4,10 +4,10 @@ When publishing releases, Lighthouse will include an "Update Priority" section i The "Update Priority" section will include a table which may appear like so: -|User Class |Beacon Node | Validator Client| ---- | --- | --- -|Staking Users| Medium Priority | Low Priority | -|Non-Staking Users| Low Priority|---| +| User Class | Beacon Node | Validator Client | +|-------------------|-----------------|------------------| +| Staking Users | Medium Priority | Low Priority | +| Non-Staking Users | Low Priority | --- | To understand this table, the following terms are important: diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 4b977f5222..661035ca51 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -19,6 +19,10 @@ Install the following packages: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang ``` +> Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories +> of Ubuntu 18.04 or earlier. On these distributions CMake can still be installed via PPA: +> [https://apt.kitware.com/](https://apt.kitware.com) + #### macOS 1. Install the [Homebrew][] package manager. @@ -48,10 +52,9 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' choco install llvm ``` -These dependencies are for compiling Lighthouse natively on Windows, which is currently in beta -testing. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. -If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies -(Ubuntu)](#ubuntu) section. +These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run +successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you +should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about @@ -103,6 +106,23 @@ git checkout ${VERSION} make ``` +## Feature Flags + +You can customise the features that Lighthouse is built with using the `FEATURES` environment +variable. E.g. + +``` +env FEATURES="gnosis,slasher-lmdb" make +``` + +Commonly used features include: + +* `gnosis`: support for the Gnosis Beacon Chain. +* `portable`: support for legacy hardware. +* `modern`: support for exclusively modern hardware. +* `slasher-mdbx`: support for the MDBX slasher backend (enabled by default). +* `slasher-lmdb`: support for the LMDB slasher backend. + ## Troubleshooting ### Command is not found @@ -117,6 +137,10 @@ See ["Configuring the `PATH` environment variable" Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `rustup update`. +If you can't install the latest version of Rust you can instead compile using the Minimum Supported +Rust Version (MSRV) which is listed under the `rust-version` key in Lighthouse's +[Cargo.toml](https://github.com/sigp/lighthouse/blob/stable/lighthouse/Cargo.toml). + If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of memory during compilation. If you are on a resource-constrained device you can look into [cross compilation](./cross-compiling.md), or use a [pre-built diff --git a/book/src/installation.md b/book/src/installation.md index 38fbe6b780..bc546e0987 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -1,6 +1,6 @@ # 📦 Installation -Lighthouse runs on Linux, macOS, and Windows (still in beta testing). +Lighthouse runs on Linux, macOS, and Windows. There are three core methods to obtain the Lighthouse application: @@ -8,13 +8,15 @@ There are three core methods to obtain the Lighthouse application: - [Docker images](./docker.md). - [Building from source](./installation-source.md). -The community maintains additional installation methods (currently only one). +Community-maintained additional installation methods: - [Homebrew package](./homebrew.md). +- Arch Linux AUR packages: [source](https://aur.archlinux.org/packages/lighthouse-ethereum), + [binary](https://aur.archlinux.org/packages/lighthouse-ethereum-bin). Additionally, there are two extra guides for specific uses: -- [Rapsberry Pi 4 guide](./pi.md). +- [Raspberry Pi 4 guide](./pi.md). - [Cross-compiling guide for developers](./cross-compiling.md). ## Minimum System Requirements diff --git a/book/src/mainnet-validator.md b/book/src/mainnet-validator.md index 0f91b8e272..41735f85bb 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet-validator.md @@ -34,7 +34,7 @@ Remember, if you get stuck you can always reach out on our [Discord][discord]. > > **Please note**: the Lighthouse team does not take any responsibility for losses or damages -> occured through the use of Lighthouse. We have an experienced internal security team and have +> occurred through the use of Lighthouse. We have an experienced internal security team and have > undergone multiple third-party security-reviews, however the possibility of bugs or malicious > interference remains a real and constant threat. Validators should be prepared to lose some rewards > due to the actions of other actors on the consensus layer or software bugs. See the diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md new file mode 100644 index 0000000000..104a7ead6d --- /dev/null +++ b/book/src/merge-migration.md @@ -0,0 +1,206 @@ +# Merge Migration + +This document provides detail for users who want to run a merge-ready Lighthouse node. + +> The merge is occuring on mainnet in September. You _must_ have a merge-ready setup by September 6 +> 2022. + +## Necessary Configuration + +There are two configuration changes required for a Lighthouse node to operate correctly throughout +the merge: + +1. You *must* run your own execution engine such as Geth or Nethermind alongside Lighthouse. + You *must* update your `lighthouse bn` configuration to connect to the execution engine using new + flags which are documented on this page in the + [Connecting to an execution engine](#connecting-to-an-execution-engine) section. +2. If your Lighthouse node has validators attached you *must* nominate an Ethereum address to + receive transactions tips from blocks proposed by your validators. These changes should + be made to your `lighthouse vc` configuration, and are covered on the + [Suggested fee recipient](./suggested-fee-recipient.md) page. + +Additionally, you _must_ update Lighthouse to v3.0.0 (or later), and must update your execution +engine to a merge-ready version. + +## When? + +You must configure your node to be merge-ready before the Bellatrix fork occurs on the network +on which your node is operating. + +* **Mainnet**: the Bellatrix fork is scheduled for epoch 144896, September 6 2022 11:34 UTC. + You must ensure your node configuration is updated before then in order to continue following + the chain. We recommend updating your configuration now. + +* **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has already occurred. + You must have a merge-ready configuration right now. + +## Connecting to an execution engine + +The Lighthouse beacon node must connect to an execution engine in order to validate the transactions +present in post-merge blocks. Two new flags are used to configure this connection: + +- `--execution-endpoint `: the URL of the execution engine API. Often this will be + `http://localhost:8551`. +- `--execution-jwt `: the path to the file containing the JWT secret shared by Lighthouse and the + execution engine. + +If you set up an execution engine with `--execution-endpoint` then you *must* provide a JWT secret +using `--execution-jwt`. This is a mandatory form of authentication that ensures that Lighthouse +has authority to control the execution engine. + +The execution engine connection must be **exclusive**, i.e. you must have one execution node +per beacon node. The reason for this is that the beacon node _controls_ the execution node. Please +see the [FAQ](#faq) for further information about why many:1 and 1:many configurations are not +supported. + +### Execution engine configuration + +Each execution engine has its own flags for configuring the engine API and JWT. Please consult +the relevant page for your execution engine for the required flags: + +- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients) +- [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) +- [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/) + +Once you have configured your execution engine to open up the engine API (usually on port 8551) you +should add the URL to your `lighthouse bn` flags with `--execution-endpoint `, as well as +the path to the JWT secret with `--execution-jwt `. + +There are merge-ready releases of all compatible execution engines available now. + +### Example + +Let us look at an example of the command line arguments for a pre-merge production staking BN: + +```bash +lighthouse \ + --network mainnet \ + beacon_node \ + --http \ + --eth1-endpoints http://localhost:8545,https://mainnet.infura.io/v3/TOKEN +``` + +Converting the above to a post-merge configuration would render: + +```bash +lighthouse \ + --network mainnet \ + beacon_node \ + --http \ + --execution-endpoint http://localhost:8551 + --execution-jwt ~/.ethereum/geth/jwtsecret +``` + +The changes here are: + +1. Remove `--eth1-endpoints` + - The endpoint at `localhost` can be retained, it is our local execution engine. Once it is + upgraded to a merge-compatible release it will be used in the post-merge environment. + - The `infura.io` endpoint will be abandoned, Infura and most other third-party node providers + *are not* compatible with post-merge BNs. +2. Add the `--execution-endpoint` flag. + - We have reused the node at `localhost`, however we've switched to the authenticated engine API + port `8551`. All execution engines will have a specific port for this API, however it might + not be `8551`, see their documentation for details. +3. Add the `--execution-jwt` flag. + - This is the path to a file containing a 32-byte secret for authenticating the BN with the + execution engine. In this example our execution engine is Geth, so we've chosen the default + location for Geth. Your execution engine might have a different path. It is critical that both + the BN and execution engine reference a file with the same value, otherwise they'll fail to + communicate. + +Note that the `--network` and `--http` flags haven't changed. The only changes required for the +merge are ensuring that `--execution-endpoint` and `--execution-jwt` flags are provided! In fact, +you can even leave the `--eth1-endpoints` flag there, it will be ignored. This is not recommended as +a deprecation warning will be logged and Lighthouse *may* remove these flags in the future. + +### The relationship between `--eth1-endpoints` and `--execution-endpoint` + +Pre-merge users will be familiar with the `--eth1-endpoints` flag. This provides a list of Ethereum +"eth1" nodes (e.g., Geth, Nethermind, etc). Each beacon node (BN) can have multiple eth1 endpoints +and each eth1 endpoint can have many BNs connection (many-to-many relationship). The eth1 node +provides a source of truth for the [deposit +contract](https://ethereum.org/en/staking/deposit-contract/) and beacon chain proposers include this +information in beacon blocks in order to on-board new validators. BNs exclusively use the `eth` +namespace on the eth1 [JSON-RPC API](https://ethereum.org/en/developers/docs/apis/json-rpc/) to +achieve this. + +To progress through the Bellatrix upgrade nodes will need a *new* connection to an "eth1" node; +`--execution-endpoint`. This connection has a few different properties. Firstly, the term "eth1 +node" has been deprecated and replaced with "execution engine". Whilst "eth1 node" and "execution +engine" still refer to the same projects (Geth, Nethermind, etc) the former refers to the pre-merge +versions and the latter refers to post-merge versions. Secondly, there is a strict one-to-one +relationship between Lighthouse and the execution engine; only one Lighthouse node can connect to +one execution engine. Thirdly, it is impossible to fully verify the post-merge chain without an +execution engine. It *was* possible to verify the pre-merge chain without an eth1 node, it was just +impossible to reliably *propose* blocks without it. + +Since an execution engine is a hard requirement in the post-merge chain and the execution engine +contains the transaction history of the Ethereum chain, there is no longer a need for the +`--eth1-endpoints` flag for information about the deposit contract. The `--execution-endpoint` can +be used for all such queries. Therefore we can say that where `--execution-endpoint` is included +`--eth1-endpoints` should be omitted. + +## FAQ + +### How do I know if my node is set up correctly? + +Lighthouse will log a message indicating that it is ready for the merge: + +``` +INFO Ready for the merge, current_difficulty: 10789363, terminal_total_difficulty: 10790000 +``` + +Once the merge has occurred you should see that Lighthouse remains in sync and marks blocks +as `verified` indicating that they have been processed successfully by the execution engine: + +``` +INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 +``` + +### Can I still use the `--staking` flag? + +Yes. The `--staking` flag is just an alias for `--http --eth1`. The `--eth1` flag is now superfluous +so `--staking` is equivalent to `--http`. You need either `--staking` or `--http` for the validator +client to be able to connect to the beacon node. + +### Can I use `http://localhost:8545` for the execution endpoint? + +Most execution nodes use port `8545` for the Ethereum JSON-RPC API. Unless custom configuration is +used, an execution node _will not_ provide the necessary engine API on port `8545`. You should +not attempt to use `http://localhost:8545` as your engine URL and should instead use +`http://localhost:8551`. + +### Can I share an execution node between multiple beacon nodes (many:1)? + +It is **not** possible to connect more than one beacon node to the same execution engine. There must be a 1:1 relationship between beacon nodes and execution nodes. + +The beacon node controls the execution node via the engine API, telling it which block is the +current head of the chain. If multiple beacon nodes were to connect to a single execution node they +could set conflicting head blocks, leading to frequent re-orgs on the execution node. + +We imagine that in future there will be HTTP proxies available which allow users to nominate a +single controlling beacon node, while allowing consistent updates from other beacon nodes. + +### What about multiple execution endpoints (1:many)? + +It is **not** possible to connect one beacon node to more than one execution engine. There must be a 1:1 relationship between beacon nodes and execution nodes. + +Since an execution engine can only have one controlling BN, the value of having multiple execution +engines connected to the same BN is very low. An execution engine cannot be shared between BNs to +reduce costs. + +Whilst having multiple execution engines connected to a single BN might be useful for advanced +testing scenarios, Lighthouse (and other consensus clients) have decided to support *only one* +execution endpoint. Such scenarios could be resolved with a custom-made HTTP proxy. + +## Additional Resources + +There are several community-maintained guides which provide more background information, as well as +guidance for specific setups. + +- [Ethereum.org: The Merge](https://ethereum.org/en/upgrades/merge/) +- [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). +- [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) +- [EthDocker: Merge Preparation](https://eth-docker.net/docs/About/MergePrep/) +- [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/book/src/redundancy.md b/book/src/redundancy.md index 3409effb36..dae7ac51fe 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -4,8 +4,8 @@ There are three places in Lighthouse where redundancy is notable: -1. ✅ GOOD: Using a redundant Beacon node in `lighthouse vc --beacon-nodes` -1. ✅ GOOD: Using a redundant execution node in `lighthouse bn --eth1-endpoints` +1. ✅ GOOD: Using a redundant beacon node in `lighthouse vc --beacon-nodes` +1. ❌ NOT SUPPORTED: Using a redundant execution node in `lighthouse bn --execution-endpoint` 1. ☠️ BAD: Running redundant `lighthouse vc` instances with overlapping keypairs. I mention (3) since it is unsafe and should not be confused with the other two @@ -42,7 +42,7 @@ There are a few interesting properties about the list of `--beacon-nodes`: - *Synced is preferred*: the validator client prefers a synced beacon node over one that is still syncing. - *Failure is sticky*: if a beacon node fails, it will be flagged as offline - and wont be retried again for the rest of the slot (12 seconds). This helps prevent the impact + and won't be retried again for the rest of the slot (12 seconds). This helps prevent the impact of time-outs and other lengthy errors. > Note: When supplying multiple beacon nodes the `http://localhost:5052` address must be explicitly @@ -51,7 +51,7 @@ There are a few interesting properties about the list of `--beacon-nodes`: ### Configuring a redundant Beacon Node -In our previous example we listed `http://192.168.1.1:5052` as a redundant +In our previous example, we listed `http://192.168.1.1:5052` as a redundant node. Apart from having sufficient resources, the backup node should have the following flags: @@ -94,23 +94,10 @@ resource consumption akin to running 64+ validators. ## Redundant execution nodes -Compared to redundancy in beacon nodes (see above), using redundant execution nodes -is very straight-forward: +Lighthouse previously supported redundant execution nodes for fetching data from the deposit +contract. On merged networks _this is no longer supported_. Each Lighthouse beacon node must be +configured in a 1:1 relationship with an execution node. For more information on the rationale +behind this decision please see the [Merge Migration](./merge-migration.md) documentation. -1. `lighthouse bn --eth1-endpoints http://localhost:8545` -1. `lighthouse bn --eth1-endpoints http://localhost:8545,http://192.168.0.1:8545` - -In the case of (1), any failure on `http://localhost:8545` will result in a -failure to update the execution client cache in the beacon node. Consistent failure over a -period of hours may result in a failure in block production. - -However, in the case of (2), the `http://192.168.0.1:8545` execution client endpoint will -be tried each time the first fails. Execution client endpoints will be tried from first to -last in the list, until a successful response is obtained. - -There is no need for special configuration on the execution client endpoint, all endpoints can (probably should) -be configured identically. - -> Note: When supplying multiple endpoints the `http://localhost:8545` address must be explicitly -> provided (if it is desired). It will only be used as default if no `--eth1-endpoints` flag is -> provided at all. +To achieve redundancy we recommend configuring [Redundant beacon nodes](#redundant-beacon-nodes) +where each has its own execution engine. diff --git a/book/src/setup.md b/book/src/setup.md index dfff9290e6..e8c56623be 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -19,7 +19,7 @@ The additional requirements for developers are: ## Using `make` -Commands to run the test suite are avaiable via the `Makefile` in the +Commands to run the test suite are available via the `Makefile` in the project root for the benefit of CI/CD. We list some of these commands below so you can run them locally and avoid CI failures: diff --git a/book/src/slasher.md b/book/src/slasher.md index 05107238c3..61dc4b327f 100644 --- a/book/src/slasher.md +++ b/book/src/slasher.md @@ -1,6 +1,6 @@ # Running a Slasher -Lighthouse includes a slasher for identifying slashable offences comitted by other validators and +Lighthouse includes a slasher for identifying slashable offences committed by other validators and including proof of those offences in blocks. Running a slasher is a good way to contribute to the health of the network, and doing so can earn @@ -43,6 +43,34 @@ By default the slasher stores data in the `slasher_db` directory inside the beac e.g. `~/.lighthouse/{network}/beacon/slasher_db`. You can use this flag to change that storage directory. +### Database Backend + +* Flag: `--slasher-backend NAME` +* Argument: one of `mdbx`, `lmdb` or `disabled` +* Default: `mdbx` + +Since Lighthouse v2.6.0 it is possible to use one of several database backends with the slasher: + +- MDBX (default) +- LMDB + +The advantage of MDBX is that it performs compaction, resulting in less disk usage over time. The +disadvantage is that upstream MDBX has removed support for Windows and macOS, so Lighthouse is stuck +on an older version. If bugs are found in our pinned version of MDBX it may be deprecated in future. + +LMDB does not have compaction but is more stable upstream than MDBX. It is not currently recommended +to use the LMDB backend on Windows. + +More backends may be added in future. + +### Switching Backends + +If you change database backends and want to reclaim the space used by the old backend you can +delete the following files from your `slasher_db` directory: + +* removing MDBX: delete `mdbx.dat` and `mdbx.lck` +* removing LMDB: delete `data.mdb` and `lock.mdb` + ### History Length * Flag: `--slasher-history-length EPOCHS` @@ -65,11 +93,11 @@ changed after initialization. * Argument: maximum size of the database in gigabytes * Default: 256 GB -The slasher uses MDBX as its backing store, which places a hard limit on the size of the database +Both database backends LMDB and MDBX place a hard limit on the size of the database file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after initialization if the limit is reached. -By default the limit is set to accomodate the default history length and around 300K validators but +By default the limit is set to accommodate the default history length and around 300K validators but you can set it lower if running with a reduced history length. The space required scales approximately linearly in validator count and history length, i.e. if you halve either you can halve the space required. @@ -85,10 +113,6 @@ where `V` is the validator count and `N` is the history length. You should set the maximum size higher than the estimate to allow room for growth in the validator count. -> NOTE: In Lighthouse v2.1.0 the slasher database was switched from LMDB to MDBX. Unlike LMDB, MDBX -> does garbage collection of free pages and is capable of shrinking the database file and preventing -> it from growing indefinitely. - ### Update Period * Flag: `--slasher-update-period SECONDS` @@ -134,7 +158,7 @@ the slot duration. ### Chunk Size and Validator Chunk Size * Flags: `--slasher-chunk-size EPOCHS`, `--slasher-validator-chunk-size NUM_VALIDATORS` -* Arguments: number of ecochs, number of validators +* Arguments: number of epochs, number of validators * Defaults: 16, 256 Adjusting these parameter should only be done in conjunction with reading in detail diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md index 9ae6c102e3..a60c8e36dc 100644 --- a/book/src/slashing-protection.md +++ b/book/src/slashing-protection.md @@ -54,7 +54,7 @@ Examples where it is **ineffective** are: clients (e.g. Lighthouse and Prysm) running on the same machine, two Lighthouse instances using different datadirs, or two clients on completely different machines (e.g. one on a cloud server and one running locally). You are responsible for ensuring that your validator keys are never - running simultanously – the slashing protection DB **cannot protect you in this case**. + running simultaneously – the slashing protection DB **cannot protect you in this case**. * Importing keys from another client without also importing voting history. * If you use `--init-slashing-protection` to recreate a missing slashing protection database. diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index 3ff71ec7d6..c1739aa937 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -1,8 +1,10 @@ # Suggested Fee Recipient -*Note: these documents are not relevant until the Bellatrix (Merge) upgrade has occurred.* +The _fee recipient_ is an Ethereum address nominated by a beacon chain validator to receive +tips from user transactions. If you run validators on a network that has already merged +or is due to merge soon then you should nominate a fee recipient for your validators. -## Fee recipient trust assumptions +## Background During post-merge block production, the Beacon Node (BN) will provide a `suggested_fee_recipient` to the execution node. This is a 20-byte Ethereum address which the EL might choose to set as the @@ -10,29 +12,30 @@ coinbase and the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, it may use any address it chooses. It is assumed that an honest execution node *will* use the -`suggested_fee_recipient`, but users should note this trust assumption. +`suggested_fee_recipient`, but users should note this trust assumption. -The `suggested_fee_recipient` can be provided to the VC, who will transmit it to the BN. The BN also +The `suggested_fee_recipient` can be provided to the VC, which will transmit it to the BN. The BN also has a choice regarding the fee recipient it passes to the execution node, creating another noteworthy trust assumption. To be sure *you* control your fee recipient value, run your own BN and execution node (don't use third-party services). -The Lighthouse VC provides three methods for setting the `suggested_fee_recipient` (also known +## How to configure a suggested fee recipient + +The Lighthouse VC provides two methods for setting the `suggested_fee_recipient` (also known simply as the "fee recipient") to be passed to the execution layer during block production. The Lighthouse BN also provides a method for defining this value, should the VC not transmit a value. -Assuming trustworthy nodes, the priority for the four methods is: +Assuming trustworthy nodes, the priority for the three methods is: 1. `validator_definitions.yml` -1. `--suggested-fee-recipient-file` 1. `--suggested-fee-recipient` provided to the VC. 1. `--suggested-fee-recipient` provided to the BN. -Users may configure the fee recipient via `validator_definitions.yml` or via the -`--suggested-fee-recipient-file` flag. The value in `validator_definitions.yml` will always take -precedence. +> **NOTE**: It is **not** recommended to _only_ set the fee recipient on the beacon node, as this results +> in sub-optimal block proposals. See [this issue](https://github.com/sigp/lighthouse/issues/3432) +> for details. ### 1. Setting the fee recipient in the `validator_definitions.yml` @@ -56,36 +59,134 @@ Below is an example of the validator_definitions.yml with `suggested_fee_recipie suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" ``` -### 2. Using the "--suggested-fee-recipient-file" flag on the validator client - -Users can specify a file with the `--suggested-fee-recipient-file` flag. This option is useful for dynamically -changing fee recipients. This file is reloaded each time a validator is chosen to propose a block. - -Usage: -`lighthouse vc --suggested-fee-recipient-file fee_recipient.txt` - -The file should contain key value pairs corresponding to validator public keys and their associated -fee recipient. The file can optionally contain a `default` key for the default case. - -The following example sets the default and the values for the validators with pubkeys `0x87a5` and -`0xa556`: - -``` -default: 0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21 -0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007: 0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21 -0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477: 0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d -``` - -Lighthouse will first search for the fee recipient corresponding to the public key of the proposing -validator, if there are no matches for the public key, then it uses the address corresponding to the -default key (if present). - -### 3. Using the "--suggested-fee-recipient" flag on the validator client +### 2. Using the "--suggested-fee-recipient" flag on the validator client The `--suggested-fee-recipient` can be provided to the VC to act as a default value for all validators where a `suggested_fee_recipient` is not loaded from another method. -### 4. Using the "--suggested-fee-recipient" flag on the beacon node +Provide a 0x-prefixed address, e.g. + +``` +lighthouse vc --suggested-fee-recipient 0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b ... +``` + + +### 3. Using the "--suggested-fee-recipient" flag on the beacon node The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the validator client does not transmit a `suggested_fee_recipient` to the BN. + +``` +lighthouse bn --suggested-fee-recipient 0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b ... +``` + +**This value should be considered an emergency fallback**. You should set the fee recipient in the +validator client in order for the execution node to be given adequate notice of block proposal. + +## Setting the fee recipient dynamically using the keymanager API + +When the [validator client API](api-vc.md) is enabled, the +[standard keymanager API](https://ethereum.github.io/keymanager-APIs/) includes an endpoint +for setting the fee recipient dynamically for a given public key. When used, the fee recipient +will be saved in `validator_definitions.yml` so that it persists across restarts of the validator +client. + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/eth/v1/validator/{pubkey}/feerecipient` | +| Method | POST | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 202, 404 | + +#### Example Request Body +```json +{ + "ethaddress": "0x1D4E51167DBDC4789a014357f4029ff76381b16c" +} +``` + +```bash +DATADIR=$HOME/.lighthouse/mainnet +PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 +FEE_RECIPIENT=0x1D4E51167DBDC4789a014357f4029ff76381b16c + +curl -X POST \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + -d "{ \"ethaddress\": \"${FEE_RECIPIENT}\" }" \ + http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq +``` + +#### Successful Response (202) +```json +null +``` + +### Querying the fee recipient + +The same path with a `GET` request can be used to query the fee recipient for a given public key at any time. + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/eth/v1/validator/{pubkey}/feerecipient` | +| Method | GET | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 404 | + +```bash +DATADIR=$HOME/.lighthouse/mainnet +PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 + +curl -X GET \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq +``` + +#### Successful Response (200) +```json +{ + "data": { + "pubkey": "0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591", + "ethaddress": "0x1d4e51167dbdc4789a014357f4029ff76381b16c" + } +} +``` + +### Removing the fee recipient + +The same path with a `DELETE` request can be used to remove the fee recipient for a given public key at any time. +This is useful if you want the fee recipient to fall back to the validator client (or beacon node) default. + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/eth/v1/validator/{pubkey}/feerecipient` | +| Method | DELETE | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 204, 404 | + +```bash +DATADIR=$HOME/.lighthouse/mainnet +PUBKEY=0xa9735061c84fc0003657e5bd38160762b7ef2d67d280e00347b1781570088c32c06f15418c144949f5d736b1d3a6c591 + +curl -X DELETE \ + -H "Authorization: Bearer $(cat ${DATADIR}/validators/api-token.txt)" \ + -H "Content-Type: application/json" \ + http://localhost:5062/eth/v1/validator/${PUBKEY}/feerecipient | jq +``` + +#### Successful Response (204) +```json +null +``` + +## FAQ + +### Why do I have to nominate an Ethereum address as the fee recipient? + +You might wonder why the validator can't just accumulate transactions fees in the same way that it +accumulates other staking rewards. The reason for this is that transaction fees are computed and +validated by the execution node, and therefore need to be paid to an address that exists on the +execution chain. Validators use BLS keys which do not correspond to Ethereum addresses, so they +have no "presence" on the execution chain. Therefore, it's necessary for each validator to nominate +a separate fee recipient address. diff --git a/book/src/validator-import-launchpad.md b/book/src/validator-import-launchpad.md index aee9ac7b96..9849b91b70 100644 --- a/book/src/validator-import-launchpad.md +++ b/book/src/validator-import-launchpad.md @@ -1,6 +1,6 @@ # Importing from the Ethereum Staking Launch pad -The [Staking Lauchpad](https://github.com/ethereum/eth2.0-deposit) is a website +The [Staking Launchpad](https://github.com/ethereum/eth2.0-deposit) is a website from the Ethereum Foundation which guides users how to use the [`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) command-line program to generate consensus validator keys. diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index edf80e7308..9074bc0273 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -4,6 +4,9 @@ Lighthouse allows for fine-grained monitoring of specific validators using the " Generally users will want to use this function to track their own validators, however, it can be used for any validator, regardless of who controls it. +_Note: If you are looking for remote metric monitoring, please see the docs on +[Prometheus Metrics](./advanced_metrics.md)_. + ## Monitoring is in the Beacon Node Lighthouse performs validator monitoring in the Beacon Node (BN) instead of the Validator Client diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index d3a28102f6..4c44eaa602 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "2.2.1" +version = "3.1.0" authors = ["Sigma Prime "] edition = "2021" diff --git a/boot_node/src/lib.rs b/boot_node/src/lib.rs index f4391f987a..3d9dada0fd 100644 --- a/boot_node/src/lib.rs +++ b/boot_node/src/lib.rs @@ -50,7 +50,7 @@ pub fn run( let logger = Logger::root(drain.fuse(), o!()); let _scope_guard = slog_scope::set_global_logger(logger); - let _log_guard = slog_stdlog::init_with_level(debug_level).unwrap(); + slog_stdlog::init_with_level(debug_level).unwrap(); let log = slog_scope::logger(); // Run the main function emitting any errors diff --git a/bors.toml b/bors.toml index d7d1e98762..0ff5d6231b 100644 --- a/bors.toml +++ b/bors.toml @@ -7,6 +7,7 @@ status = [ "ef-tests-ubuntu", "dockerfile-ubuntu", "eth1-simulator-ubuntu", + "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", "check-benchmarks", "check-consensus", diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 3f4831ae17..66e3b73547 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -45,6 +45,29 @@ pub enum Error { UnableToCreateValidatorDir(PathBuf), } +#[derive(Clone, PartialEq, Serialize, Deserialize, Hash, Eq)] +pub struct Web3SignerDefinition { + pub url: String, + /// Path to a .pem file. + #[serde(skip_serializing_if = "Option::is_none")] + pub root_certificate_path: Option, + /// Specifies a request timeout. + /// + /// The timeout is applied from when the request starts connecting until the response body has finished. + #[serde(skip_serializing_if = "Option::is_none")] + pub request_timeout_ms: Option, + + /// Path to a PKCS12 file. + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_path: Option, + + /// Password for the PKCS12 file. + /// + /// An empty password will be used if this is omitted. + #[serde(skip_serializing_if = "Option::is_none")] + pub client_identity_password: Option, +} + /// Defines how the validator client should attempt to sign messages for this validator. #[derive(Clone, PartialEq, Serialize, Deserialize)] #[serde(tag = "type")] @@ -62,27 +85,7 @@ pub enum SigningDefinition { /// /// https://github.com/ConsenSys/web3signer #[serde(rename = "web3signer")] - Web3Signer { - url: String, - /// Path to a .pem file. - #[serde(skip_serializing_if = "Option::is_none")] - root_certificate_path: Option, - /// Specifies a request timeout. - /// - /// The timeout is applied from when the request starts connecting until the response body has finished. - #[serde(skip_serializing_if = "Option::is_none")] - request_timeout_ms: Option, - - /// Path to a PKCS12 file. - #[serde(skip_serializing_if = "Option::is_none")] - client_identity_path: Option, - - /// Password for the PKCS12 file. - /// - /// An empty password will be used if this is omitted. - #[serde(skip_serializing_if = "Option::is_none")] - client_identity_password: Option, - }, + Web3Signer(Web3SignerDefinition), } impl SigningDefinition { @@ -106,6 +109,12 @@ pub struct ValidatorDefinition { #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, + #[serde(default)] pub description: String, #[serde(flatten)] pub signing_definition: SigningDefinition, @@ -123,6 +132,8 @@ impl ValidatorDefinition { voting_keystore_password: Option, graffiti: Option, suggested_fee_recipient: Option
, + gas_limit: Option, + builder_proposals: Option, ) -> Result { let voting_keystore_path = voting_keystore_path.as_ref().into(); let keystore = @@ -135,6 +146,8 @@ impl ValidatorDefinition { description: keystore.description().unwrap_or("").to_string(), graffiti, suggested_fee_recipient, + gas_limit, + builder_proposals, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, @@ -281,6 +294,8 @@ impl ValidatorDefinitions { description: keystore.description().unwrap_or("").to_string(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path, @@ -523,4 +538,84 @@ mod tests { Some(Address::from_str("0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d").unwrap()) ); } + + #[test] + fn gas_limit_checks() { + let no_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + let def: ValidatorDefinition = serde_yaml::from_str(no_gas_limit).unwrap(); + assert!(def.gas_limit.is_none()); + + let invalid_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: "banana" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: Result = serde_yaml::from_str(invalid_gas_limit); + assert!(def.is_err()); + + let valid_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: 35000000 + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: ValidatorDefinition = serde_yaml::from_str(valid_gas_limit).unwrap(); + assert_eq!(def.gas_limit, Some(35000000)); + } + + #[test] + fn builder_proposals_checks() { + let no_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + let def: ValidatorDefinition = serde_yaml::from_str(no_builder_proposals).unwrap(); + assert!(def.builder_proposals.is_none()); + + let invalid_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + builder_proposals: "banana" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: Result = serde_yaml::from_str(invalid_builder_proposals); + assert!(def.is_err()); + + let valid_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + builder_proposals: true + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: ValidatorDefinition = serde_yaml::from_str(valid_builder_proposals).unwrap(); + assert_eq!(def.builder_proposals, Some(true)); + } } diff --git a/common/deposit_contract/build.rs b/common/deposit_contract/build.rs index ac05a53e31..cae1d480c8 100644 --- a/common/deposit_contract/build.rs +++ b/common/deposit_contract/build.rs @@ -54,12 +54,10 @@ fn read_contract_file_from_url(url: Url) -> Result { .map_err(|e| format!("Respsonse is not a valid json {:?}", e))?; Ok(contract) } - Err(e) => { - return Err(format!( - "No abi file found. Failed to download from github: {:?}", - e - )) - } + Err(e) => Err(format!( + "No abi file found. Failed to download from github: {:?}", + e + )), } } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 3e965a2bf8..f096aca97e 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -23,7 +23,7 @@ use lighthouse_network::PeerId; pub use reqwest; use reqwest::{IntoUrl, RequestBuilder, Response}; pub use reqwest::{StatusCode, Url}; -use sensitive_url::SensitiveUrl; +pub use sensitive_url::SensitiveUrl; use serde::{de::DeserializeOwned, Serialize}; use std::convert::TryFrom; use std::fmt; @@ -110,7 +110,10 @@ pub struct Timeouts { pub liveness: Duration, pub proposal: Duration, pub proposer_duties: Duration, + pub sync_committee_contribution: Duration, pub sync_duties: Duration, + pub get_beacon_blocks_ssz: Duration, + pub get_debug_beacon_states: Duration, } impl Timeouts { @@ -121,7 +124,10 @@ impl Timeouts { liveness: timeout, proposal: timeout, proposer_duties: timeout, + sync_committee_contribution: timeout, sync_duties: timeout, + get_beacon_blocks_ssz: timeout, + get_debug_beacon_states: timeout, } } } @@ -237,9 +243,10 @@ impl BeaconNodeHttpClient { &self, url: U, accept_header: Accept, + timeout: Duration, ) -> Result>, Error> { let opt_response = self - .get_response(url, |b| b.accept(accept_header)) + .get_response(url, |b| b.accept(accept_header).timeout(timeout)) .await .optional()?; match opt_response { @@ -330,7 +337,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_root( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -349,7 +356,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_fork( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -368,7 +375,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_finality_checkpoints( &self, state_id: StateId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -388,7 +395,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, ids: Option<&[ValidatorId]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -418,7 +425,7 @@ impl BeaconNodeHttpClient { state_id: StateId, ids: Option<&[ValidatorId]>, statuses: Option<&[ValidatorStatus]>, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -458,7 +465,7 @@ impl BeaconNodeHttpClient { slot: Option, index: Option, epoch: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -491,7 +498,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, epoch: Option, - ) -> Result, Error> { + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -516,7 +523,7 @@ impl BeaconNodeHttpClient { &self, state_id: StateId, validator_id: &ValidatorId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -537,7 +544,7 @@ impl BeaconNodeHttpClient { &self, slot: Option, parent_root: Option, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -564,7 +571,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_headers_block_id( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -633,7 +640,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks( &self, block_id: BlockId, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let path = self.get_beacon_blocks_path(block_id)?; let response = match self.get_response(path, |b| b).await.optional()? { Some(res) => res, @@ -642,20 +649,31 @@ impl BeaconNodeHttpClient { // If present, use the fork provided in the headers to decode the block. Gracefully handle // missing and malformed fork names by falling back to regular deserialisation. - let (block, version) = match response.fork_name_from_header() { + let (block, version, execution_optimistic) = match response.fork_name_from_header() { Ok(Some(fork_name)) => { - map_fork_name_with!(fork_name, SignedBeaconBlock, { - let ForkVersionedResponse { version, data } = response.json().await?; - (data, version) - }) + let (data, (version, execution_optimistic)) = + map_fork_name_with!(fork_name, SignedBeaconBlock, { + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, (version, execution_optimistic)) + }); + (data, version, execution_optimistic) } Ok(None) | Err(_) => { - let ForkVersionedResponse { version, data } = response.json().await?; - (data, version) + let ExecutionOptimisticForkVersionedResponse { + version, + execution_optimistic, + data, + } = response.json().await?; + (data, version, execution_optimistic) } }; - Ok(Some(ForkVersionedResponse { + Ok(Some(ExecutionOptimisticForkVersionedResponse { version, + execution_optimistic, data: block, })) } @@ -688,7 +706,7 @@ impl BeaconNodeHttpClient { ) -> Result>, Error> { let path = self.get_beacon_blocks_path(block_id)?; - self.get_bytes_opt_accept_header(path, Accept::Ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_beacon_blocks_ssz) .await? .map(|bytes| SignedBeaconBlock::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)) .transpose() @@ -700,7 +718,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_root( &self, block_id: BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -719,7 +737,7 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_blocks_attestations( &self, block_id: BlockId, - ) -> Result>>>, Error> { + ) -> Result>>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -907,7 +925,12 @@ impl BeaconNodeHttpClient { .push("validator") .push("contribution_and_proofs"); - self.post(path, &signed_contributions).await?; + self.post_with_timeout( + path, + &signed_contributions, + self.timeouts.sync_committee_contribution, + ) + .await?; Ok(()) } @@ -929,6 +952,23 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `POST validator/register_validator` + pub async fn post_validator_register_validator( + &self, + registration_data: &[SignedValidatorRegistrationData], + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("register_validator"); + + self.post(path, ®istration_data).await?; + + Ok(()) + } + /// `GET config/fork_schedule` pub async fn get_config_fork_schedule(&self) -> Result>, Error> { let mut path = self.eth_path(V1)?; @@ -942,7 +982,9 @@ impl BeaconNodeHttpClient { } /// `GET config/spec` - pub async fn get_config_spec(&self) -> Result, Error> { + pub async fn get_config_spec( + &self, + ) -> Result, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1099,7 +1141,7 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let path = self.get_debug_beacon_states_path(state_id)?; self.get_opt(path).await } @@ -1108,7 +1150,7 @@ impl BeaconNodeHttpClient { pub async fn get_debug_beacon_states_v1( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1130,15 +1172,30 @@ impl BeaconNodeHttpClient { ) -> Result>, Error> { let path = self.get_debug_beacon_states_path(state_id)?; - self.get_bytes_opt_accept_header(path, Accept::Ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_debug_beacon_states) .await? .map(|bytes| BeaconState::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)) .transpose() } - /// `GET debug/beacon/heads` + /// `GET v2/debug/beacon/heads` pub async fn get_debug_beacon_heads( &self, + ) -> Result>, Error> { + let mut path = self.eth_path(V2)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("heads"); + + self.get(path).await + } + + /// `GET v1/debug/beacon/heads` (LEGACY) + pub async fn get_debug_beacon_heads_v1( + &self, ) -> Result>, Error> { let mut path = self.eth_path(V1)?; @@ -1230,7 +1287,7 @@ impl BeaconNodeHttpClient { .await } - /// `GET v2/validator/blocks/{slot}` + /// `GET v1/validator/blinded_blocks/{slot}` pub async fn get_validator_blinded_blocks_with_verify_randao< T: EthSpec, Payload: ExecPayload, @@ -1241,7 +1298,7 @@ impl BeaconNodeHttpClient { graffiti: Option<&Graffiti>, verify_randao: Option, ) -> Result>, Error> { - let mut path = self.eth_path(V2)?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -1470,7 +1527,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, indices: &[u64], - ) -> Result>, Error> { + ) -> Result>, Error> { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -1491,7 +1548,7 @@ impl BeaconNodeHttpClient { /// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an /// appropriate error message. -async fn ok_or_error(response: Response) -> Result { +pub async fn ok_or_error(response: Response) -> Result { let status = response.status(); if status == StatusCode::OK { diff --git a/common/eth2/src/lighthouse/block_rewards.rs b/common/eth2/src/lighthouse/block_rewards.rs index 186cbd888c..38070f3539 100644 --- a/common/eth2/src/lighthouse/block_rewards.rs +++ b/common/eth2/src/lighthouse/block_rewards.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; -use types::{Hash256, Slot}; +use types::{AttestationData, Hash256, Slot}; /// Details about the rewards paid to a block proposer for proposing a block. /// @@ -42,6 +42,9 @@ pub struct AttestationRewards { /// /// Each element of the vec is a map from validator index to reward. pub per_attestation_rewards: Vec>, + /// The attestations themselves (optional). + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub attestations: Vec, } /// Query parameters for the `/lighthouse/block_rewards` endpoint. @@ -51,4 +54,7 @@ pub struct BlockRewardsQuery { pub start_slot: Slot, /// Upper slot limit for block rewards returned (inclusive). pub end_slot: Slot, + /// Include the full attestations themselves? + #[serde(default)] + pub include_attestations: bool, } diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 5e02ec0bb2..88b5b68401 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -303,11 +303,11 @@ impl ValidatorClientHttpClient { } /// Perform a HTTP DELETE request. - async fn delete_with_unsigned_response( + async fn delete_with_raw_response( &self, url: U, body: &T, - ) -> Result { + ) -> Result { let response = self .client .delete(url) @@ -316,7 +316,16 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::Reqwest)?; - let response = ok_or_error(response).await?; + ok_or_error(response).await + } + + /// Perform a HTTP DELETE request. + async fn delete_with_unsigned_response( + &self, + url: U, + body: &T, + ) -> Result { + let response = self.delete_with_raw_response(url, body).await?; Ok(response.json().await?) } @@ -345,7 +354,9 @@ impl ValidatorClientHttpClient { } /// `GET lighthouse/spec` - pub async fn get_lighthouse_spec(&self) -> Result, Error> { + pub async fn get_lighthouse_spec( + &self, + ) -> Result, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() @@ -453,7 +464,9 @@ impl ValidatorClientHttpClient { pub async fn patch_lighthouse_validators( &self, voting_pubkey: &PublicKeyBytes, - enabled: bool, + enabled: Option, + gas_limit: Option, + builder_proposals: Option, ) -> Result<(), Error> { let mut path = self.server.full.clone(); @@ -463,7 +476,15 @@ impl ValidatorClientHttpClient { .push("validators") .push(&voting_pubkey.to_string()); - self.patch(path, &ValidatorPatchRequest { enabled }).await + self.patch( + path, + &ValidatorPatchRequest { + enabled, + gas_limit, + builder_proposals, + }, + ) + .await } fn make_keystores_url(&self) -> Result { @@ -486,6 +507,30 @@ impl ValidatorClientHttpClient { Ok(url) } + fn make_fee_recipient_url(&self, pubkey: &PublicKeyBytes) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("feerecipient"); + Ok(url) + } + + fn make_gas_limit_url(&self, pubkey: &PublicKeyBytes) -> Result { + let mut url = self.server.full.clone(); + url.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("validator") + .push(&pubkey.to_string()) + .push("gas_limit"); + Ok(url) + } + /// `GET lighthouse/auth` pub async fn get_auth(&self) -> Result { let mut url = self.server.full.clone(); @@ -543,14 +588,71 @@ impl ValidatorClientHttpClient { let url = self.make_remotekeys_url()?; self.delete_with_unsigned_response(url, req).await } + + /// `GET /eth/v1/validator/{pubkey}/feerecipient` + pub async fn get_fee_recipient( + &self, + pubkey: &PublicKeyBytes, + ) -> Result { + let url = self.make_fee_recipient_url(pubkey)?; + self.get(url) + .await + .map(|generic: GenericResponse| generic.data) + } + + /// `POST /eth/v1/validator/{pubkey}/feerecipient` + pub async fn post_fee_recipient( + &self, + pubkey: &PublicKeyBytes, + req: &UpdateFeeRecipientRequest, + ) -> Result { + let url = self.make_fee_recipient_url(pubkey)?; + self.post_with_raw_response(url, req).await + } + + /// `DELETE /eth/v1/validator/{pubkey}/feerecipient` + pub async fn delete_fee_recipient(&self, pubkey: &PublicKeyBytes) -> Result { + let url = self.make_fee_recipient_url(pubkey)?; + self.delete_with_raw_response(url, &()).await + } + + /// `GET /eth/v1/validator/{pubkey}/gas_limit` + pub async fn get_gas_limit( + &self, + pubkey: &PublicKeyBytes, + ) -> Result { + let url = self.make_gas_limit_url(pubkey)?; + self.get(url) + .await + .map(|generic: GenericResponse| generic.data) + } + + /// `POST /eth/v1/validator/{pubkey}/gas_limit` + pub async fn post_gas_limit( + &self, + pubkey: &PublicKeyBytes, + req: &UpdateGasLimitRequest, + ) -> Result { + let url = self.make_gas_limit_url(pubkey)?; + self.post_with_raw_response(url, req).await + } + + /// `DELETE /eth/v1/validator/{pubkey}/gas_limit` + pub async fn delete_gas_limit(&self, pubkey: &PublicKeyBytes) -> Result { + let url = self.make_gas_limit_url(pubkey)?; + self.delete_with_raw_response(url, &()).await + } } -/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an -/// appropriate error message. +/// Returns `Ok(response)` if the response is a `200 OK` response or a +/// `202 Accepted` response. Otherwise, creates an appropriate error message. async fn ok_or_error(response: Response) -> Result { let status = response.status(); - if status == StatusCode::OK { + if status == StatusCode::OK + || status == StatusCode::ACCEPTED + || status == StatusCode::NO_CONTENT + { Ok(response) } else if let Ok(message) = response.json().await { Err(Error::ServerMessage(message)) diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index d9fe969138..887bcb99ea 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -2,7 +2,20 @@ use account_utils::ZeroizeString; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; use slashing_protection::interchange::Interchange; -use types::PublicKeyBytes; +use types::{Address, PublicKeyBytes}; + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct GetFeeRecipientResponse { + pub pubkey: PublicKeyBytes, + pub ethaddress: Address, +} + +#[derive(Debug, Deserialize, Serialize, PartialEq)] +pub struct GetGasLimitResponse { + pub pubkey: PublicKeyBytes, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_limit: u64, +} #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct AuthResponse { diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index fe9b6a48c0..92439337f6 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -26,6 +26,12 @@ pub struct ValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, #[serde(with = "eth2_serde_utils::quoted_u64")] pub deposit_gwei: u64, } @@ -49,6 +55,12 @@ pub struct CreatedValidator { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, pub eth1_deposit_tx_data: String, #[serde(with = "eth2_serde_utils::quoted_u64")] pub deposit_gwei: u64, @@ -62,7 +74,15 @@ pub struct PostValidatorsResponseData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorPatchRequest { - pub enabled: bool, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub enabled: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, } #[derive(Clone, PartialEq, Serialize, Deserialize)] @@ -70,8 +90,18 @@ pub struct KeystoreValidatorsPostRequest { pub password: ZeroizeString, pub enable: bool, pub keystore: Keystore, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub graffiti: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -84,6 +114,12 @@ pub struct Web3SignerValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, pub voting_public_key: PublicKey, pub url: String, #[serde(default)] @@ -97,3 +133,14 @@ pub struct Web3SignerValidatorRequest { #[serde(skip_serializing_if = "Option::is_none")] pub client_identity_password: Option, } + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct UpdateFeeRecipientRequest { + pub ethaddress: Address, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct UpdateGasLimitRequest { + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_limit: u64, +} diff --git a/common/eth2/src/mixin.rs b/common/eth2/src/mixin.rs index 1de26961e6..a33cf8a40c 100644 --- a/common/eth2/src/mixin.rs +++ b/common/eth2/src/mixin.rs @@ -21,17 +21,17 @@ impl ResponseOptional for Result { /// Trait for extracting the fork name from the headers of a response. pub trait ResponseForkName { #[allow(clippy::result_unit_err)] - fn fork_name_from_header(&self) -> Result, ()>; + fn fork_name_from_header(&self) -> Result, String>; } impl ResponseForkName for Response { - fn fork_name_from_header(&self) -> Result, ()> { + fn fork_name_from_header(&self) -> Result, String> { self.headers() .get(CONSENSUS_VERSION_HEADER) .map(|fork_name| { fork_name .to_str() - .map_err(|_| ()) + .map_err(|e| e.to_string()) .and_then(ForkName::from_str) }) .transpose() diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index e4ba43d526..9a256f5ade 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -189,6 +189,14 @@ impl fmt::Display for StateId { #[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] pub struct DutiesResponse { pub dependent_root: Hash256, + pub execution_optimistic: Option, + pub data: T, +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] +pub struct ExecutionOptimisticResponse { + pub execution_optimistic: Option, pub data: T, } @@ -204,6 +212,18 @@ impl From for GenericResponse } } +impl GenericResponse { + pub fn add_execution_optimistic( + self, + execution_optimistic: bool, + ) -> ExecutionOptimisticResponse { + ExecutionOptimisticResponse { + execution_optimistic: Some(execution_optimistic), + data: self.data, + } + } +} + #[derive(Debug, PartialEq, Clone, Serialize)] #[serde(bound = "T: Serialize")] pub struct GenericResponseRef<'a, T: Serialize> { @@ -216,6 +236,14 @@ impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct ExecutionOptimisticForkVersionedResponse { + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + pub execution_optimistic: Option, + pub data: T, +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct ForkVersionedResponse { #[serde(skip_serializing_if = "Option::is_none")] @@ -495,6 +523,8 @@ pub struct DepositContractData { pub struct ChainHeadData { pub slot: Slot, pub root: Hash256, + #[serde(skip_serializing_if = "Option::is_none")] + pub execution_optimistic: Option, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -522,6 +552,7 @@ pub struct VersionData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncingData { pub is_syncing: bool, + pub is_optimistic: Option, pub head_slot: Slot, pub sync_distance: Slot, } @@ -651,7 +682,7 @@ pub struct ValidatorAggregateAttestationQuery { pub slot: Slot, } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct BeaconCommitteeSubscription { #[serde(with = "eth2_serde_utils::quoted_u64")] pub validator_index: u64, @@ -794,6 +825,7 @@ pub struct PeerCount { pub struct SseBlock { pub slot: Slot, pub block: Hash256, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -801,6 +833,7 @@ pub struct SseFinalizedCheckpoint { pub block: Hash256, pub state: Hash256, pub epoch: Epoch, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -811,6 +844,7 @@ pub struct SseHead { pub current_duty_dependent_root: Hash256, pub previous_duty_dependent_root: Hash256, pub epoch_transition: bool, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -823,6 +857,7 @@ pub struct SseChainReorg { pub new_head_block: Hash256, pub new_head_state: Hash256, pub epoch: Epoch, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] @@ -838,6 +873,7 @@ pub struct SseLateHead { pub attestable_delay: Option, pub imported_delay: Option, pub set_as_head_delay: Option, + pub execution_optimistic: bool, } #[derive(PartialEq, Debug, Serialize, Clone)] diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index ec8522ac98..7e3c025a83 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -69,7 +69,7 @@ impl Eth2Config { #[derive(Copy, Clone, Debug, PartialEq)] pub struct Eth2NetArchiveAndDirectory<'a> { pub name: &'a str, - pub unique_id: &'a str, + pub config_dir: &'a str, pub genesis_is_known: bool, } @@ -81,7 +81,7 @@ impl<'a> Eth2NetArchiveAndDirectory<'a> { .parse::() .expect("should parse manifest dir as path") .join(PREDEFINED_NETWORKS_DIR) - .join(self.unique_id) + .join(self.config_dir) } pub fn genesis_state_archive(&self) -> PathBuf { @@ -96,6 +96,7 @@ const GENESIS_STATE_IS_KNOWN: bool = true; #[derive(Copy, Clone, Debug, PartialEq)] pub struct HardcodedNet { pub name: &'static str, + pub config_dir: &'static str, pub genesis_is_known: bool, pub config: &'static [u8], pub deploy_block: &'static [u8], @@ -108,15 +109,15 @@ pub struct HardcodedNet { /// It also defines a `include__file!` macro which provides a wrapper around /// `std::include_bytes`, allowing the inclusion of bytes from the specific testnet directory. macro_rules! define_archive { - ($name_ident: ident, $name_str: tt, $genesis_is_known: ident) => { + ($name_ident: ident, $config_dir: tt, $genesis_is_known: ident) => { paste! { #[macro_use] pub mod $name_ident { use super::*; pub const ETH2_NET_DIR: Eth2NetArchiveAndDirectory = Eth2NetArchiveAndDirectory { - name: $name_str, - unique_id: $name_str, + name: stringify!($name_ident), + config_dir: $config_dir, genesis_is_known: $genesis_is_known, }; @@ -130,7 +131,7 @@ macro_rules! define_archive { "/", $this_crate::predefined_networks_dir!(), "/", - $name_str, + $config_dir, "/", $filename )) @@ -149,6 +150,7 @@ macro_rules! define_net { $this_crate::HardcodedNet { name: ETH2_NET_DIR.name, + config_dir: ETH2_NET_DIR.config_dir, genesis_is_known: ETH2_NET_DIR.genesis_is_known, config: $this_crate::$include_file!($this_crate, "../", "config.yaml"), deploy_block: $this_crate::$include_file!($this_crate, "../", "deploy_block.txt"), @@ -164,13 +166,13 @@ macro_rules! define_net { /// - `HARDCODED_NET_NAMES`: a list of the *names* of the networks defined by this macro. #[macro_export] macro_rules! define_nets { - ($this_crate: ident, $($name_ident: ident, $name_str: tt,)+) => { + ($this_crate: ident, $($name_ident: ident,)+) => { $this_crate::paste! { $( const [<$name_ident:upper>]: $this_crate::HardcodedNet = $this_crate::define_net!($this_crate, $name_ident, [<include_ $name_ident _file>]); )+ const HARDCODED_NETS: &[$this_crate::HardcodedNet] = &[$([<$name_ident:upper>],)+]; - pub const HARDCODED_NET_NAMES: &[&'static str] = &[$($name_str,)+]; + pub const HARDCODED_NET_NAMES: &[&'static str] = &[$(stringify!($name_ident),)+]; } }; } @@ -197,9 +199,9 @@ macro_rules! define_nets { /// `build.rs` which will unzip the genesis states. Then, that `eth2_network_configs` crate can /// perform the final step of using `std::include_bytes` to bake the files (bytes) into the binary. macro_rules! define_hardcoded_nets { - ($(($name_ident: ident, $name_str: tt, $genesis_is_known: ident)),+) => { + ($(($name_ident: ident, $config_dir: tt, $genesis_is_known: ident)),+) => { $( - define_archive!($name_ident, $name_str, $genesis_is_known); + define_archive!($name_ident, $config_dir, $genesis_is_known); )+ pub const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[$($name_ident::ETH2_NET_DIR,)+]; @@ -213,7 +215,7 @@ macro_rules! define_hardcoded_nets { #[macro_export] macro_rules! instantiate_hardcoded_nets { ($this_crate: ident) => { - $this_crate::define_nets!($this_crate, $($name_ident, $name_str,)+); + $this_crate::define_nets!($this_crate, $($name_ident,)+); } } }; @@ -234,9 +236,76 @@ macro_rules! define_hardcoded_nets { // // The directory containing the testnet files should match the human-friendly name (element 1). define_hardcoded_nets!( - (mainnet, "mainnet", GENESIS_STATE_IS_KNOWN), - (prater, "prater", GENESIS_STATE_IS_KNOWN), - (gnosis, "gnosis", GENESIS_STATE_IS_KNOWN), - (kiln, "kiln", GENESIS_STATE_IS_KNOWN), - (ropsten, "ropsten", GENESIS_STATE_IS_KNOWN) + ( + // Network name (must be unique among all networks). + mainnet, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "mainnet", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + prater, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "prater", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + goerli, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + // + // The Goerli network is effectively an alias to Prater. + "prater", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + gnosis, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "gnosis", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + kiln, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "kiln", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + ropsten, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "ropsten", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ), + ( + // Network name (must be unique among all networks). + sepolia, + // The name of the directory in the `eth2_network_config/built_in_network_configs` + // directory where the configuration files are located for this network. + "sepolia", + // Set to `true` if the genesis state can be found in the `built_in_network_configs` + // directory. + GENESIS_STATE_IS_KNOWN + ) ); diff --git a/common/eth2_network_config/built_in_network_configs/kiln/config.yaml b/common/eth2_network_config/built_in_network_configs/kiln/config.yaml index 797c0672c3..5631c8a0bf 100644 --- a/common/eth2_network_config/built_in_network_configs/kiln/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/kiln/config.yaml @@ -6,7 +6,7 @@ PRESET_BASE: 'mainnet' MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 95000 # Mar 11th, 2022, 14:00 UTC MIN_GENESIS_TIME: 1647007200 -# Gensis fork +# Genesis fork GENESIS_FORK_VERSION: 0x70000069 # 300 seconds (5 min) GENESIS_DELAY: 300 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index cc4e7dcab4..6e87a708f8 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -6,8 +6,8 @@ PRESET_BASE: 'mainnet' # Transition # --------------------------------------------------------------- -# TBD, 2**256-2**10 is a placeholder -TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +# Estimated on Sept 15, 2022 +TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 @@ -35,7 +35,7 @@ ALTAIR_FORK_VERSION: 0x01000000 ALTAIR_FORK_EPOCH: 74240 # Merge BELLATRIX_FORK_VERSION: 0x02000000 -BELLATRIX_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Sharding SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml index fcb2d5342b..7000ff0bbc 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/boot_enr.yaml @@ -7,4 +7,11 @@ # Prysm bootnode #1 - enr:-Ku4QFmUkNp0g9bsLX2PfVeIyT-9WO-PZlrqZBNtEyofOOfLMScDjaTzGxIb1Ns9Wo5Pm_8nlq-SZwcQfTH2cgO-s88Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQLV_jMOIxKbjHFKgrkFvwDvpexo6Nd58TK5k7ss4Vt0IoN1ZHCCG1g # Lighthouse bootnode #1 -- enr:-LK4QLINdtobGquK7jukLDAKmsrH2ZuHM4k0TklY5jDTD4ZgfxR9weZmo5Jwu81hlKu3qPAvk24xHGBDjYs4o8f1gZ0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhDRN_P6Jc2VjcDI1NmsxoQJuNujTgsJUHUgVZML3pzrtgNtYg7rQ4K1tkWERgl0DdoN0Y3CCIyiDdWRwgiMo +- enr:-Ly4QFPk-cTMxZ3jWTafiNblEZkQIXGF2aVzCIGW0uHp6KaEAvBMoctE8S7YU0qZtuS7By0AA4YMfKoN9ls_GJRccVpFh2F0dG5ldHOI__________-EZXRoMpCC9KcrAgAQIIS2AQAAAAAAgmlkgnY0gmlwhKh3joWJc2VjcDI1NmsxoQKrxz8M1IHwJqRIpDqdVW_U1PeixMW5SfnBD-8idYIQrIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA +# Lighthouse bootnode #2 +- enr:-L64QJmwSDtaHVgGiqIxJWUtxWg6uLCipsms6j-8BdsOJfTWAs7CLF9HJnVqFE728O-JYUDCxzKvRdeMqBSauHVCMdaCAVWHYXR0bmV0c4j__________4RldGgykIL0pysCABAghLYBAAAAAACCaWSCdjSCaXCEQWxOdolzZWNwMjU2azGhA7Qmod9fK86WidPOzLsn5_8QyzL7ZcJ1Reca7RnD54vuiHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo +# Nimbus bootstrap nodes +- enr:-LK4QMzPq4Q7w5R-rnGQDcI8BYky6oPVBGQTbS1JJLVtNi_8PzBLV7Bdzsoame9nJK5bcJYpGHn4SkaDN2CM6tR5G_4Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhAN4yvyJc2VjcDI1NmsxoQKa8Qnp_P2clLIP6VqLKOp_INvEjLszalEnW0LoBZo4YYN0Y3CCI4yDdWRwgiOM +- enr:-LK4QLM_pPHa78R8xlcU_s40Y3XhFjlb3kPddW9lRlY67N5qeFE2Wo7RgzDgRs2KLCXODnacVHMFw1SfpsW3R474RZEBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhANBY-yJc2VjcDI1NmsxoQNsZkFXgKbTzuxF7uwxlGauTGJelE6HD269CcFlZ_R7A4N0Y3CCI4yDdWRwgiOM +# Teku bootnode +- enr:-KK4QH0RsNJmIG0EX9LSnVxMvg-CAOr3ZFF92hunU63uE7wcYBjG1cFbUTvEa5G_4nDJkRhUq9q2ck9xY-VX1RtBsruBtIRldGgykIL0pysBABAg__________-CaWSCdjSCaXCEEnXQ0YlzZWNwMjU2azGhA1grTzOdMgBvjNrk-vqWtTZsYQIi0QawrhoZrsn5Hd56g3RjcIIjKIN1ZHCCIyg diff --git a/common/eth2_network_config/built_in_network_configs/prater/config.yaml b/common/eth2_network_config/built_in_network_configs/prater/config.yaml index d337c4120a..d173be20de 100644 --- a/common/eth2_network_config/built_in_network_configs/prater/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/prater/config.yaml @@ -6,8 +6,7 @@ PRESET_BASE: 'mainnet' # Transition # --------------------------------------------------------------- -# TBD, 2**256-2**10 is a placeholder -TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912 +TERMINAL_TOTAL_DIFFICULTY: 10790000 # By default, don't use these params TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 @@ -35,7 +34,7 @@ ALTAIR_FORK_VERSION: 0x01001020 ALTAIR_FORK_EPOCH: 36660 # Merge BELLATRIX_FORK_VERSION: 0x02001020 -BELLATRIX_FORK_EPOCH: 18446744073709551615 +BELLATRIX_FORK_EPOCH: 112260 # Sharding SHARDING_FORK_VERSION: 0x03001020 SHARDING_FORK_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml index 45921aec53..5dad3ff759 100644 --- a/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/ropsten/config.yaml @@ -23,7 +23,7 @@ ALTAIR_FORK_EPOCH: 500 # Merge BELLATRIX_FORK_VERSION: 0x80000071 BELLATRIX_FORK_EPOCH: 750 -TERMINAL_TOTAL_DIFFICULTY: 43531756765713534 +TERMINAL_TOTAL_DIFFICULTY: 50000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml new file mode 100644 index 0000000000..abb3b1250e --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml @@ -0,0 +1 @@ +- enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml new file mode 100644 index 0000000000..4c3e4bb6ec --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -0,0 +1,76 @@ +# Extends the mainnet preset +PRESET_BASE: 'mainnet' +CONFIG_NAME: 'sepolia' + +# Genesis +# --------------------------------------------------------------- +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 1300 +# Sunday, June 19, 2022 2:00:00 PM +UTC +MIN_GENESIS_TIME: 1655647200 +GENESIS_FORK_VERSION: 0x90000069 +GENESIS_DELAY: 86400 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x90000070 +ALTAIR_FORK_EPOCH: 50 + +# Merge +BELLATRIX_FORK_VERSION: 0x90000071 +BELLATRIX_FORK_EPOCH: 100 +TERMINAL_TOTAL_DIFFICULTY: 17000000000000000 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x03001020 +CAPELLA_FORK_EPOCH: 18446744073709551615 + +# Sharding +SHARDING_FORK_VERSION: 0x04001020 +SHARDING_FORK_EPOCH: 18446744073709551615 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 14 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 + + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 11155111 +DEPOSIT_NETWORK_ID: 11155111 +DEPOSIT_CONTRACT_ADDRESS: 0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt b/common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt new file mode 100644 index 0000000000..5674fc3e57 --- /dev/null +++ b/common/eth2_network_config/built_in_network_configs/sepolia/deploy_block.txt @@ -0,0 +1 @@ +1273020 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip b/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip new file mode 100644 index 0000000000..1321634cea Binary files /dev/null and b/common/eth2_network_config/built_in_network_configs/sepolia/genesis.ssz.zip differ diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 8df54a5a8b..2bfd003266 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -256,6 +256,13 @@ mod tests { config.beacon_state::<E>().expect("beacon state can decode"); } + #[test] + fn prater_and_goerli_are_equal() { + let goerli = Eth2NetworkConfig::from_hardcoded_net(&GOERLI).unwrap(); + let prater = Eth2NetworkConfig::from_hardcoded_net(&PRATER).unwrap(); + assert_eq!(goerli, prater); + } + #[test] fn hard_coded_nets_work() { for net in HARDCODED_NETS { @@ -275,7 +282,7 @@ mod tests { "{:?}", net.name ); - assert_eq!(config.config.config_name, Some(net.name.to_string())); + assert_eq!(config.config.config_name, Some(net.config_dir.to_string())); } } diff --git a/common/fallback/src/lib.rs b/common/fallback/src/lib.rs index d91de09be0..70f327d204 100644 --- a/common/fallback/src/lib.rs +++ b/common/fallback/src/lib.rs @@ -45,7 +45,7 @@ impl<T> Fallback<T> { { match error { FallbackError::AllErrored(v) => format!( - "All fallback errored: {}", + "All fallbacks errored: {}", join( zip(self.servers.iter().map(f), v.iter()) .map(|(server, error)| format!("{} => {:?}", server, error)), diff --git a/common/hashset_delay/Cargo.toml b/common/hashset_delay/Cargo.toml deleted file mode 100644 index 1aa525a115..0000000000 --- a/common/hashset_delay/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "hashset_delay" -version = "0.2.0" -authors = ["Sigma Prime <contact@sigmaprime.io>"] -edition = "2021" - -[dependencies] -futures = "0.3.7" -tokio-util = { version = "0.6.2", features = ["time"] } - -[dev-dependencies] -tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } diff --git a/common/hashset_delay/src/hashset_delay.rs b/common/hashset_delay/src/hashset_delay.rs deleted file mode 100644 index 052d71fe3b..0000000000 --- a/common/hashset_delay/src/hashset_delay.rs +++ /dev/null @@ -1,197 +0,0 @@ -//NOTE: This is just a specific case of a HashMapDelay. -// The code has been copied to make unique `insert` and `insert_at` functions. - -/// The default delay for entries, in seconds. This is only used when `insert()` is used to add -/// entries. -const DEFAULT_DELAY: u64 = 30; - -use futures::prelude::*; -use std::{ - collections::HashMap, - pin::Pin, - task::{Context, Poll}, - time::{Duration, Instant}, -}; -use tokio_util::time::delay_queue::{self, DelayQueue}; - -pub struct HashSetDelay<K> -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - /// The given entries. - entries: HashMap<K, MapEntry>, - /// A queue holding the timeouts of each entry. - expirations: DelayQueue<K>, - /// The default expiration timeout of an entry. - default_entry_timeout: Duration, -} - -/// A wrapping around entries that adds the link to the entry's expiration, via a `delay_queue` key. -struct MapEntry { - /// The expiration key for the entry. - key: delay_queue::Key, - /// The actual entry. - value: Instant, -} - -impl<K> Default for HashSetDelay<K> -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - fn default() -> Self { - HashSetDelay::new(Duration::from_secs(DEFAULT_DELAY)) - } -} - -impl<K> HashSetDelay<K> -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - /// Creates a new instance of `HashSetDelay`. - pub fn new(default_entry_timeout: Duration) -> Self { - HashSetDelay { - entries: HashMap::new(), - expirations: DelayQueue::new(), - default_entry_timeout, - } - } - - /// Insert an entry into the mapping. Entries will expire after the `default_entry_timeout`. - pub fn insert(&mut self, key: K) { - self.insert_at(key, self.default_entry_timeout); - } - - /// Inserts an entry that will expire at a given instant. If the entry already exists, the - /// timeout is updated. - pub fn insert_at(&mut self, key: K, entry_duration: Duration) { - if self.contains(&key) { - // update the timeout - self.update_timeout(&key, entry_duration); - } else { - let delay_key = self.expirations.insert(key.clone(), entry_duration); - let entry = MapEntry { - key: delay_key, - value: Instant::now() + entry_duration, - }; - self.entries.insert(key, entry); - } - } - - /// Gets a reference to an entry if it exists. - /// - /// Returns None if the entry does not exist. - pub fn get(&self, key: &K) -> Option<&Instant> { - self.entries.get(key).map(|entry| &entry.value) - } - - /// Returns true if the key exists, false otherwise. - pub fn contains(&self, key: &K) -> bool { - self.entries.contains_key(key) - } - - /// Returns the length of the mapping. - pub fn len(&self) -> usize { - self.entries.len() - } - - /// Checks if the mapping is empty. - pub fn is_empty(&self) -> bool { - self.entries.is_empty() - } - - /// Updates the timeout for a given key. Returns true if the key existed, false otherwise. - /// - /// Panics if the duration is too far in the future. - pub fn update_timeout(&mut self, key: &K, timeout: Duration) -> bool { - if let Some(entry) = self.entries.get(key) { - self.expirations.reset(&entry.key, timeout); - true - } else { - false - } - } - - /// Removes a key from the map returning the value associated with the key that was in the map. - /// - /// Return false if the key was not in the map. - pub fn remove(&mut self, key: &K) -> bool { - if let Some(entry) = self.entries.remove(key) { - self.expirations.remove(&entry.key); - return true; - } - false - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns false. - pub fn retain<F: FnMut(&K) -> bool>(&mut self, mut f: F) { - let expiration = &mut self.expirations; - self.entries.retain(|key, entry| { - let result = f(key); - if !result { - expiration.remove(&entry.key); - } - result - }) - } - - /// Removes all entries from the map. - pub fn clear(&mut self) { - self.entries.clear(); - self.expirations.clear(); - } - - /// Returns a vector of referencing all keys in the map. - pub fn keys(&self) -> impl Iterator<Item = &K> { - self.entries.keys() - } -} - -impl<K> Stream for HashSetDelay<K> -where - K: std::cmp::Eq + std::hash::Hash + std::clone::Clone + Unpin, -{ - type Item = Result<K, String>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { - match self.expirations.poll_expired(cx) { - Poll::Ready(Some(Ok(key))) => match self.entries.remove(key.get_ref()) { - Some(_) => Poll::Ready(Some(Ok(key.into_inner()))), - None => Poll::Ready(Some(Err("Value no longer exists in expirations".into()))), - }, - Poll::Ready(Some(Err(e))) => { - Poll::Ready(Some(Err(format!("delay queue error: {:?}", e)))) - } - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } -} - -#[cfg(test)] - -mod tests { - use super::*; - - #[tokio::test] - async fn should_not_panic() { - let key = 2u8; - - let mut map = HashSetDelay::default(); - - map.insert(key); - map.update_timeout(&key, Duration::from_secs(100)); - - let fut = |cx: &mut Context| { - let _ = map.poll_next_unpin(cx); - let _ = map.poll_next_unpin(cx); - Poll::Ready(()) - }; - - future::poll_fn(fut).await; - - map.insert(key); - map.update_timeout(&key, Duration::from_secs(100)); - } -} diff --git a/common/hashset_delay/src/lib.rs b/common/hashset_delay/src/lib.rs deleted file mode 100644 index 175ad72cfa..0000000000 --- a/common/hashset_delay/src/lib.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! This crate provides a single type (its counter-part HashMapDelay has been removed as it -//! currently is not in use in lighthouse): -//! - `HashSetDelay` -//! -//! # HashSetDelay -//! -//! This is similar to a `HashMapDelay` except the mapping maps to the expiry time. This -//! allows users to add objects and check their expiry deadlines before the `Stream` -//! consumes them. - -mod hashset_delay; -pub use crate::hashset_delay::HashSetDelay; diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 98973de1ad..5d25bb313f 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -54,14 +54,15 @@ //! } //! ``` -use prometheus::{HistogramOpts, Opts}; +use prometheus::{Error, HistogramOpts, Opts}; use std::time::Duration; use prometheus::core::{Atomic, GenericGauge, GenericGaugeVec}; pub use prometheus::{ + exponential_buckets, linear_buckets, proto::{Metric, MetricFamily, MetricType}, Encoder, Gauge, GaugeVec, Histogram, HistogramTimer, HistogramVec, IntCounter, IntCounterVec, - IntGauge, IntGaugeVec, Result, TextEncoder, + IntGauge, IntGaugeVec, Result, TextEncoder, DEFAULT_BUCKETS, }; /// Collect all the metrics for reporting. @@ -99,7 +100,17 @@ pub fn try_create_float_gauge(name: &str, help: &str) -> Result<Gauge> { /// Attempts to create a `Histogram`, returning `Err` if the registry does not accept the counter /// (potentially due to naming conflict). pub fn try_create_histogram(name: &str, help: &str) -> Result<Histogram> { - let opts = HistogramOpts::new(name, help); + try_create_histogram_with_buckets(name, help, Ok(DEFAULT_BUCKETS.to_vec())) +} + +/// Attempts to create a `Histogram` with specified buckets, returning `Err` if the registry does not accept the counter +/// (potentially due to naming conflict) or no valid buckets are provided. +pub fn try_create_histogram_with_buckets( + name: &str, + help: &str, + buckets: Result<Vec<f64>>, +) -> Result<Histogram> { + let opts = HistogramOpts::new(name, help).buckets(buckets?); let histogram = Histogram::with_opts(opts)?; prometheus::register(Box::new(histogram.clone()))?; Ok(histogram) @@ -112,7 +123,18 @@ pub fn try_create_histogram_vec( help: &str, label_names: &[&str], ) -> Result<HistogramVec> { - let opts = HistogramOpts::new(name, help); + try_create_histogram_vec_with_buckets(name, help, Ok(DEFAULT_BUCKETS.to_vec()), label_names) +} + +/// Attempts to create a `HistogramVec` with specified buckets, returning `Err` if the registry does not accept the counter +/// (potentially due to naming conflict) or no valid buckets are provided. +pub fn try_create_histogram_vec_with_buckets( + name: &str, + help: &str, + buckets: Result<Vec<f64>>, + label_names: &[&str], +) -> Result<HistogramVec> { + let opts = HistogramOpts::new(name, help).buckets(buckets?); let histogram_vec = HistogramVec::new(opts, label_names)?; prometheus::register(Box::new(histogram_vec.clone()))?; Ok(histogram_vec) @@ -357,3 +379,28 @@ fn duration_to_f64(duration: Duration) -> f64 { let nanos = f64::from(duration.subsec_nanos()) / 1e9; duration.as_secs() as f64 + nanos } + +/// Create buckets using divisors of 10 multiplied by powers of 10, e.g., +/// […, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, …] +/// +/// The buckets go from `10^min_power` to `5 × 10^max_power`, inclusively. +/// The total number of buckets is `3 * (max_power - min_power + 1)`. +/// +/// assert_eq!(vec![0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0], decimal_buckets(-1, 1)); +/// assert_eq!(vec![1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0], decimal_buckets(0, 2)); +pub fn decimal_buckets(min_power: i32, max_power: i32) -> Result<Vec<f64>> { + if max_power < min_power { + return Err(Error::Msg(format!( + "decimal_buckets min_power needs to be <= max_power, given {} and {}", + min_power, max_power + ))); + } + + let mut buckets = Vec::with_capacity(3 * (max_power - min_power + 1) as usize); + for n in min_power..=max_power { + for m in &[1f64, 2f64, 5f64] { + buckets.push(m * 10f64.powi(n)) + } + } + Ok(buckets) +} diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index b50079f195..85baa47fbb 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v2.2.1-", - fallback = "Lighthouse/v2.2.1" + prefix = "Lighthouse/v3.1.0-", + fallback = "Lighthouse/v3.1.0" ); /// Returns `VERSION`, but with platform information appended to the end. @@ -37,8 +37,9 @@ mod test { #[test] fn version_formatting() { - let re = Regex::new(r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-rc.[0-9])?-[[:xdigit:]]{7}\+?$") - .unwrap(); + let re = + Regex::new(r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-rc.[0-9])?(-[[:xdigit:]]{7})?\+?$") + .unwrap(); assert!( re.is_match(VERSION), "version doesn't match regex: {}", diff --git a/common/monitoring_api/src/lib.rs b/common/monitoring_api/src/lib.rs index 03cdf87c25..9592c50a40 100644 --- a/common/monitoring_api/src/lib.rs +++ b/common/monitoring_api/src/lib.rs @@ -16,7 +16,7 @@ use types::*; pub use types::ProcessType; /// Duration after which we collect and send metrics to remote endpoint. -pub const UPDATE_DURATION: u64 = 60; +pub const DEFAULT_UPDATE_DURATION: u64 = 60; /// Timeout for HTTP requests. pub const TIMEOUT_DURATION: u64 = 5; @@ -55,6 +55,8 @@ pub struct Config { /// Path for the cold database required for fetching beacon db size metrics. /// Note: not relevant for validator and system metrics. pub freezer_db_path: Option<PathBuf>, + /// User-defined update period in seconds. + pub update_period_secs: Option<u64>, } #[derive(Clone)] @@ -64,6 +66,7 @@ pub struct MonitoringHttpClient { db_path: Option<PathBuf>, /// Path to the freezer database. freezer_db_path: Option<PathBuf>, + update_period: Duration, monitoring_endpoint: SensitiveUrl, log: slog::Logger, } @@ -74,6 +77,9 @@ impl MonitoringHttpClient { client: reqwest::Client::new(), db_path: config.db_path.clone(), freezer_db_path: config.freezer_db_path.clone(), + update_period: Duration::from_secs( + config.update_period_secs.unwrap_or(DEFAULT_UPDATE_DURATION), + ), monitoring_endpoint: SensitiveUrl::parse(&config.monitoring_endpoint) .map_err(|e| format!("Invalid monitoring endpoint: {:?}", e))?, log, @@ -100,10 +106,15 @@ impl MonitoringHttpClient { let mut interval = interval_at( // Have some initial delay for the metrics to get initialized Instant::now() + Duration::from_secs(25), - Duration::from_secs(UPDATE_DURATION), + self.update_period, ); - info!(self.log, "Starting monitoring api"; "endpoint" => %self.monitoring_endpoint); + info!( + self.log, + "Starting monitoring API"; + "endpoint" => %self.monitoring_endpoint, + "update_period" => format!("{}s", self.update_period.as_secs()), + ); let update_future = async move { loop { diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs index 7a3cbae20c..b6705eb602 100644 --- a/common/sensitive_url/src/lib.rs +++ b/common/sensitive_url/src/lib.rs @@ -1,5 +1,6 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; +use std::str::FromStr; use url::Url; #[derive(Debug)] @@ -9,6 +10,12 @@ pub enum SensitiveError { RedactError(String), } +impl fmt::Display for SensitiveError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + // Wrapper around Url which provides a custom `Display` implementation to protect user secrets. #[derive(Clone, PartialEq)] pub struct SensitiveUrl { @@ -39,7 +46,7 @@ impl Serialize for SensitiveUrl { where S: Serializer, { - serializer.serialize_str(&self.full.to_string()) + serializer.serialize_str(self.full.as_ref()) } } @@ -54,6 +61,14 @@ impl<'de> Deserialize<'de> for SensitiveUrl { } } +impl FromStr for SensitiveUrl { + type Err = SensitiveError; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + Self::parse(s) + } +} + impl SensitiveUrl { pub fn parse(url: &str) -> Result<Self, SensitiveError> { let surl = Url::parse(url).map_err(SensitiveError::ParseError)?; diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index f344dc4735..08bb565870 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" [dependencies] -tokio = { version = "1.14.0", features = ["rt-multi-thread"] } +tokio = { version = "1.14.0", features = ["rt-multi-thread", "macros"] } slog = "2.5.2" futures = "0.3.7" exit-future = "0.2.0" diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index dd525bea50..6bf4cc8e08 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -7,6 +7,8 @@ use slog::{crit, debug, o, trace}; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; +pub use tokio::task::JoinHandle; + /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { @@ -312,6 +314,61 @@ impl TaskExecutor { Some(future) } + /// Block the current (non-async) thread on the completion of some future. + /// + /// ## Warning + /// + /// This method is "dangerous" since calling it from an async thread will result in a panic! Any + /// use of this outside of testing should be very deeply considered as Lighthouse has been + /// burned by this function in the past. + /// + /// Determining what is an "async thread" is rather challenging; just because a function isn't + /// marked as `async` doesn't mean it's not being called from an `async` function or there isn't + /// a `tokio` context present in the thread-local storage due to some `rayon` funkiness. Talk to + /// @paulhauner if you plan to use this function in production. He has put metrics in here to + /// track any use of it, so don't think you can pull a sneaky one on him. + pub fn block_on_dangerous<F: Future>( + &self, + future: F, + name: &'static str, + ) -> Option<F::Output> { + let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]); + metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + let log = self.log.clone(); + let handle = self.handle()?; + let exit = self.exit.clone(); + + debug!( + log, + "Starting block_on task"; + "name" => name + ); + + handle.block_on(async { + let output = tokio::select! { + output = future => { + debug!( + log, + "Completed block_on task"; + "name" => name + ); + Some(output) + }, + _ = exit => { + debug!( + log, + "Cancelled block_on task"; + "name" => name, + ); + None + } + }; + metrics::dec_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + drop(timer); + output + }) + } + /// Returns a `Handle` to the current runtime. pub fn handle(&self) -> Option<Handle> { self.handle_provider.handle() diff --git a/common/task_executor/src/metrics.rs b/common/task_executor/src/metrics.rs index ead5925b6e..6ecea86d65 100644 --- a/common/task_executor/src/metrics.rs +++ b/common/task_executor/src/metrics.rs @@ -18,6 +18,16 @@ lazy_static! { "Time taken by blocking tasks", &["blocking_task_hist"] ); + pub static ref BLOCK_ON_TASKS_COUNT: Result<IntGaugeVec> = try_create_int_gauge_vec( + "block_on_tasks_count", + "Total number of block_on_dangerous tasks spawned", + &["name"] + ); + pub static ref BLOCK_ON_TASKS_HISTOGRAM: Result<HistogramVec> = try_create_histogram_vec( + "block_on_tasks_histogram", + "Time taken by block_on_dangerous tasks", + &["name"] + ); pub static ref TASKS_HISTOGRAM: Result<HistogramVec> = try_create_histogram_vec( "async_tasks_time_histogram", "Time taken by async tasks", diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs index f5ce1156e5..cf3d11af8d 100644 --- a/common/warp_utils/src/reject.rs +++ b/common/warp_utils/src/reject.rs @@ -205,8 +205,13 @@ pub async fn handle_rejection(err: warp::Rejection) -> Result<impl warp::Reply, code = StatusCode::FORBIDDEN; message = format!("FORBIDDEN: Invalid auth token: {}", e.0); } else if let Some(e) = err.find::<warp::reject::MissingHeader>() { - code = StatusCode::BAD_REQUEST; - message = format!("BAD_REQUEST: missing {} header", e.name()); + if e.name().eq("Authorization") { + code = StatusCode::UNAUTHORIZED; + message = "UNAUTHORIZED: missing Authorization header".to_string(); + } else { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: missing {} header", e.name()); + } } else if let Some(e) = err.find::<warp::reject::InvalidHeader>() { code = StatusCode::BAD_REQUEST; message = format!("BAD_REQUEST: invalid {} header", e.name()); diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 77603d09e6..52a738351e 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -8,10 +8,13 @@ edition = "2021" [dependencies] types = { path = "../types" } +state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } eth2_ssz = "0.4.1" eth2_ssz_derive = "0.3.0" +slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } [dev-dependencies] beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 49510e7326..bd329fff01 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,18 +1,27 @@ use crate::{ForkChoiceStore, InvalidationOperation}; -use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice}; +use proto_array::{ + Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, ProtoArrayForkChoice, +}; +use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; +use state_processing::{ + per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, + per_epoch_processing::altair::participation_cache, +}; use std::cmp::Ordering; +use std::collections::BTreeSet; use std::marker::PhantomData; use std::time::Duration; use types::{ - consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, BeaconBlock, BeaconState, - BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, + consts::merge::INTERVALS_PER_SLOT, AttestationShufflingId, AttesterSlashing, BeaconBlockRef, + BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, + ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, SignedBeaconBlock, Slot, }; #[derive(Debug)] pub enum Error<T> { InvalidAttestation(InvalidAttestation), + InvalidAttesterSlashing(AttesterSlashingValidationError), InvalidBlock(InvalidBlock), ProtoArrayError(String), InvalidProtoArrayBytes(String), @@ -51,6 +60,9 @@ pub enum Error<T> { MissingFinalizedBlock { finalized_checkpoint: Checkpoint, }, + UnrealizedVoteProcessing(state_processing::EpochProcessingError), + ParticipationCacheBuild(participation_cache::Error), + ValidatorStatuses(BeaconStateError), } impl<T> From<InvalidAttestation> for Error<T> { @@ -59,6 +71,38 @@ impl<T> From<InvalidAttestation> for Error<T> { } } +impl<T> From<AttesterSlashingValidationError> for Error<T> { + fn from(e: AttesterSlashingValidationError) -> Self { + Error::InvalidAttesterSlashing(e) + } +} + +impl<T> From<state_processing::EpochProcessingError> for Error<T> { + fn from(e: state_processing::EpochProcessingError) -> Self { + Error::UnrealizedVoteProcessing(e) + } +} + +#[derive(Debug, Clone, Copy)] +/// Controls how fork choice should behave when restoring from a persisted fork choice. +pub enum ResetPayloadStatuses { + /// Reset all payload statuses back to "optimistic". + Always, + /// Only reset all payload statuses back to "optimistic" when an "invalid" block is present. + OnlyWithInvalidPayload, +} + +impl ResetPayloadStatuses { + /// When `should_always_reset == True`, return `ResetPayloadStatuses::Always`. + pub fn always_reset_conditionally(should_always_reset: bool) -> Self { + if should_always_reset { + ResetPayloadStatuses::Always + } else { + ResetPayloadStatuses::OnlyWithInvalidPayload + } + } +} + #[derive(Debug)] pub enum InvalidBlock { UnknownParent(Hash256), @@ -114,6 +158,66 @@ impl<T> From<String> for Error<T> { } } +/// Indicates whether the unrealized justification of a block should be calculated and tracked. +/// If a block has been finalized, this can be set to false. This is useful when syncing finalized +/// portions of the chain. Otherwise this should always be set to true. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum CountUnrealized { + True, + False, +} + +impl CountUnrealized { + pub fn is_true(&self) -> bool { + matches!(self, CountUnrealized::True) + } + + pub fn and(&self, other: CountUnrealized) -> CountUnrealized { + if self.is_true() && other.is_true() { + CountUnrealized::True + } else { + CountUnrealized::False + } + } +} + +impl From<bool> for CountUnrealized { + fn from(count_unrealized: bool) -> Self { + if count_unrealized { + CountUnrealized::True + } else { + CountUnrealized::False + } + } +} + +#[derive(Copy, Clone)] +enum UpdateJustifiedCheckpointSlots { + OnTick { + current_slot: Slot, + }, + OnBlock { + state_slot: Slot, + current_slot: Slot, + }, +} + +impl UpdateJustifiedCheckpointSlots { + fn current_slot(&self) -> Slot { + match self { + UpdateJustifiedCheckpointSlots::OnTick { current_slot } => *current_slot, + UpdateJustifiedCheckpointSlots::OnBlock { current_slot, .. } => *current_slot, + } + } + + fn state_slot(&self) -> Option<Slot> { + match self { + UpdateJustifiedCheckpointSlots::OnTick { .. } => None, + UpdateJustifiedCheckpointSlots::OnBlock { state_slot, .. } => Some(*state_slot), + } + } +} + /// Indicates if a block has been verified by an execution payload. /// /// There is no variant for "invalid", since such a block should never be added to fork choice. @@ -162,51 +266,6 @@ fn compute_start_slot_at_epoch<E: EthSpec>(epoch: Epoch) -> Slot { epoch.start_slot(E::slots_per_epoch()) } -/// Called whenever the current time increases. -/// -/// ## Specification -/// -/// Equivalent to: -/// -/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick -fn on_tick<T, E>(store: &mut T, time: Slot) -> Result<(), Error<T::Error>> -where - T: ForkChoiceStore<E>, - E: EthSpec, -{ - let previous_slot = store.get_current_slot(); - - if time > previous_slot + 1 { - return Err(Error::InconsistentOnTick { - previous_slot, - time, - }); - } - - // Update store time. - store.set_current_slot(time); - - let current_slot = store.get_current_slot(); - - // Reset proposer boost if this is a new slot. - if current_slot > previous_slot { - store.set_proposer_boost_root(Hash256::zero()); - } - - // Not a new epoch, return. - if !(current_slot > previous_slot && compute_slots_since_epoch_start::<E>(current_slot) == 0) { - return Ok(()); - } - - if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { - store - .set_justified_checkpoint(*store.best_justified_checkpoint()) - .map_err(Error::ForkChoiceStoreError)?; - } - - Ok(()) -} - /// Used for queuing attestations from the current slot. Only contains the minimum necessary /// information about the attestation. #[derive(Clone, PartialEq, Encode, Decode)] @@ -248,6 +307,7 @@ fn dequeue_attestations( /// Equivalent to the `is_from_block` `bool` in: /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation +#[derive(Clone, Copy)] pub enum AttestationFromBlock { True, False, @@ -258,9 +318,17 @@ pub enum AttestationFromBlock { pub struct ForkchoiceUpdateParameters { pub head_root: Hash256, pub head_hash: Option<ExecutionBlockHash>, + pub justified_hash: Option<ExecutionBlockHash>, pub finalized_hash: Option<ExecutionBlockHash>, } +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct ForkChoiceView { + pub head_block_root: Hash256, + pub justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, +} + /// Provides an implementation of "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#ethereum-20-phase-0----beacon-chain-fork-choice @@ -279,7 +347,9 @@ pub struct ForkChoice<T, E> { /// Attestations that arrived at the current slot and must be queued for later processing. queued_attestations: Vec<QueuedAttestation>, /// Stores a cache of the values required to be sent to the execution layer. - forkchoice_update_parameters: Option<ForkchoiceUpdateParameters>, + forkchoice_update_parameters: ForkchoiceUpdateParameters, + /// The most recent result of running `Self::get_head`. + head_block_root: Hash256, _phantom: PhantomData<E>, } @@ -306,6 +376,9 @@ where anchor_block_root: Hash256, anchor_block: &SignedBeaconBlock<E>, anchor_state: &BeaconState<E>, + current_slot: Option<Slot>, + count_unrealized_full_config: CountUnrealizedFull, + spec: &ChainSpec, ) -> Result<Self, Error<T::Error>> { // Sanity check: the anchor must lie on an epoch boundary. if anchor_block.slot() % E::slots_per_epoch() != 0 { @@ -340,7 +413,10 @@ where }, ); - let proto_array = ProtoArrayForkChoice::new( + // If the current slot is not provided, use the value that was last provided to the store. + let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); + + let proto_array = ProtoArrayForkChoice::new::<E>( finalized_block_slot, finalized_block_state_root, *fc_store.justified_checkpoint(), @@ -348,41 +424,36 @@ where current_epoch_shuffling_id, next_epoch_shuffling_id, execution_status, + count_unrealized_full_config, )?; - Ok(Self { + let mut fork_choice = Self { fc_store, proto_array, queued_attestations: vec![], - forkchoice_update_parameters: None, + // This will be updated during the next call to `Self::get_head`. + forkchoice_update_parameters: ForkchoiceUpdateParameters { + head_hash: None, + justified_hash: None, + finalized_hash: None, + head_root: Hash256::zero(), + }, + // This will be updated during the next call to `Self::get_head`. + head_block_root: Hash256::zero(), _phantom: PhantomData, - }) - } + }; - /// Instantiates `Self` from some existing components. - /// - /// This is useful if the existing components have been loaded from disk after a process - /// restart. - pub fn from_components( - fc_store: T, - proto_array: ProtoArrayForkChoice, - queued_attestations: Vec<QueuedAttestation>, - ) -> Self { - Self { - fc_store, - proto_array, - queued_attestations, - forkchoice_update_parameters: None, - _phantom: PhantomData, - } + // Ensure that `fork_choice.head_block_root` is updated. + fork_choice.get_head(current_slot, spec)?; + + Ok(fork_choice) } /// Returns cached information that can be used to issue a `forkchoiceUpdated` message to an /// execution engine. /// - /// These values are updated each time `Self::get_head` is called. May return `None` if - /// `Self::get_head` has not yet been called. - pub fn get_forkchoice_update_parameters(&self) -> Option<ForkchoiceUpdateParameters> { + /// These values are updated each time `Self::get_head` is called. + pub fn get_forkchoice_update_parameters(&self) -> ForkchoiceUpdateParameters { self.forkchoice_update_parameters } @@ -440,10 +511,13 @@ where /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_head pub fn get_head( &mut self, - current_slot: Slot, + system_time_current_slot: Slot, spec: &ChainSpec, ) -> Result<Hash256, Error<T::Error>> { - self.update_time(current_slot)?; + // Provide the slot (as per the system clock) to the `fc_store` and then return its view of + // the current slot. The `fc_store` will ensure that the `current_slot` is never + // decreasing, a property which we must maintain. + let current_slot = self.update_time(system_time_current_slot, spec)?; let store = &mut self.fc_store; @@ -452,26 +526,55 @@ where *store.finalized_checkpoint(), store.justified_balances(), store.proposer_boost_root(), + store.equivocating_indices(), + current_slot, spec, )?; + self.head_block_root = head_root; + // Cache some values for the next forkchoiceUpdate call to the execution layer. let head_hash = self .get_block(&head_root) .and_then(|b| b.execution_status.block_hash()); + let justified_root = self.justified_checkpoint().root; let finalized_root = self.finalized_checkpoint().root; + let justified_hash = self + .get_block(&justified_root) + .and_then(|b| b.execution_status.block_hash()); let finalized_hash = self .get_block(&finalized_root) .and_then(|b| b.execution_status.block_hash()); - self.forkchoice_update_parameters = Some(ForkchoiceUpdateParameters { + self.forkchoice_update_parameters = ForkchoiceUpdateParameters { head_root, head_hash, + justified_hash, finalized_hash, - }); + }; Ok(head_root) } + /// Return information about: + /// + /// - The LMD head of the chain. + /// - The FFG checkpoints. + /// + /// The information is "cached" since the last call to `Self::get_head`. + /// + /// ## Notes + /// + /// The finalized/justified checkpoints are determined from the fork choice store. Therefore, + /// it's possible that the state corresponding to `get_state(get_block(head_block_root))` will + /// have *differing* finalized and justified information. + pub fn cached_fork_choice_view(&self) -> ForkChoiceView { + ForkChoiceView { + head_block_root: self.head_block_root, + justified_checkpoint: self.justified_checkpoint(), + finalized_checkpoint: self.finalized_checkpoint(), + } + } + /// Returns `true` if the given `store` should be updated to set /// `state.current_justified_checkpoint` its `justified_checkpoint`. /// @@ -482,13 +585,11 @@ where /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#should_update_justified_checkpoint fn should_update_justified_checkpoint( &mut self, - current_slot: Slot, - state: &BeaconState<E>, + new_justified_checkpoint: Checkpoint, + slots: UpdateJustifiedCheckpointSlots, spec: &ChainSpec, ) -> Result<bool, Error<T::Error>> { - self.update_time(current_slot)?; - - let new_justified_checkpoint = &state.current_justified_checkpoint(); + self.update_time(slots.current_slot(), spec)?; if compute_slots_since_epoch_start::<E>(self.fc_store.get_current_slot()) < spec.safe_slots_to_update_justified @@ -500,11 +601,13 @@ where compute_start_slot_at_epoch::<E>(self.fc_store.justified_checkpoint().epoch); // This sanity check is not in the spec, but the invariant is implied. - if justified_slot >= state.slot() { - return Err(Error::AttemptToRevertJustification { - store: justified_slot, - state: state.slot(), - }); + if let Some(state_slot) = slots.state_slot() { + if justified_slot >= state_slot { + return Err(Error::AttemptToRevertJustification { + store: justified_slot, + state: state_slot, + }); + } } // We know that the slot for `new_justified_checkpoint.root` is not greater than @@ -565,22 +668,25 @@ where #[allow(clippy::too_many_arguments)] pub fn on_block<Payload: ExecPayload<E>>( &mut self, - current_slot: Slot, - block: &BeaconBlock<E, Payload>, + system_time_current_slot: Slot, + block: BeaconBlockRef<E, Payload>, block_root: Hash256, block_delay: Duration, state: &BeaconState<E>, payload_verification_status: PayloadVerificationStatus, spec: &ChainSpec, + count_unrealized: CountUnrealized, ) -> Result<(), Error<T::Error>> { - let current_slot = self.update_time(current_slot)?; + // Provide the slot (as per the system clock) to the `fc_store` and then return its view of + // the current slot. The `fc_store` will ensure that the `current_slot` is never + // decreasing, a property which we must maintain. + let current_slot = self.update_time(system_time_current_slot, spec)?; // Parent block must be known. - if !self.proto_array.contains_block(&block.parent_root()) { - return Err(Error::InvalidBlock(InvalidBlock::UnknownParent( - block.parent_root(), - ))); - } + let parent_block = self + .proto_array + .get_block(&block.parent_root()) + .ok_or_else(|| Error::InvalidBlock(InvalidBlock::UnknownParent(block.parent_root())))?; // Blocks cannot be in the future. If they are, their consideration must be delayed until // the are in the past. @@ -629,29 +735,111 @@ where self.fc_store.set_proposer_boost_root(block_root); } - // Update justified checkpoint. - if state.current_justified_checkpoint().epoch > self.fc_store.justified_checkpoint().epoch { - if state.current_justified_checkpoint().epoch - > self.fc_store.best_justified_checkpoint().epoch + let update_justified_checkpoint_slots = UpdateJustifiedCheckpointSlots::OnBlock { + state_slot: state.slot(), + current_slot, + }; + + // Update store with checkpoints if necessary + self.update_checkpoints( + state.current_justified_checkpoint(), + state.finalized_checkpoint(), + update_justified_checkpoint_slots, + spec, + )?; + + // Update unrealized justified/finalized checkpoints. + let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if count_unrealized + .is_true() + { + let block_epoch = block.slot().epoch(E::slots_per_epoch()); + + // If the parent checkpoints are already at the same epoch as the block being imported, + // it's impossible for the unrealized checkpoints to differ from the parent's. This + // holds true because: + // + // 1. A child block cannot have lower FFG checkpoints than its parent. + // 2. A block in epoch `N` cannot contain attestations which would justify an epoch higher than `N`. + // 3. A block in epoch `N` cannot contain attestations which would finalize an epoch higher than `N - 1`. + // + // This is an optimization. It should reduce the amount of times we run + // `process_justification_and_finalization` by approximately 1/3rd when the chain is + // performing optimally. + let parent_checkpoints = parent_block + .unrealized_justified_checkpoint + .zip(parent_block.unrealized_finalized_checkpoint) + .filter(|(parent_justified, parent_finalized)| { + parent_justified.epoch == block_epoch + && parent_finalized.epoch + 1 >= block_epoch + }); + + let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = + if let Some((parent_justified, parent_finalized)) = parent_checkpoints { + (parent_justified, parent_finalized) + } else { + // FIXME(sproul): this might be slooow due to the full cache build + let justification_and_finalization_state = match block { + BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => { + let participation_cache = + per_epoch_processing::altair::ParticipationCache::new(state, spec) + .map_err(Error::ParticipationCacheBuild)?; + per_epoch_processing::altair::process_justification_and_finalization( + state, + &participation_cache, + )? + } + BeaconBlockRef::Base(_) => { + let mut validator_statuses = + per_epoch_processing::base::ValidatorStatuses::new(state, spec) + .map_err(Error::ValidatorStatuses)?; + validator_statuses + .process_attestations(state) + .map_err(Error::ValidatorStatuses)?; + per_epoch_processing::base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )? + } + }; + + ( + justification_and_finalization_state.current_justified_checkpoint(), + justification_and_finalization_state.finalized_checkpoint(), + ) + }; + + // Update best known unrealized justified & finalized checkpoints + if unrealized_justified_checkpoint.epoch + > self.fc_store.unrealized_justified_checkpoint().epoch { self.fc_store - .set_best_justified_checkpoint(state.current_justified_checkpoint()); + .set_unrealized_justified_checkpoint(unrealized_justified_checkpoint); } - if self.should_update_justified_checkpoint(current_slot, state, spec)? { + if unrealized_finalized_checkpoint.epoch + > self.fc_store.unrealized_finalized_checkpoint().epoch + { self.fc_store - .set_justified_checkpoint(state.current_justified_checkpoint()) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; + .set_unrealized_finalized_checkpoint(unrealized_finalized_checkpoint); } - } - // Update finalized checkpoint. - if state.finalized_checkpoint().epoch > self.fc_store.finalized_checkpoint().epoch { - self.fc_store - .set_finalized_checkpoint(state.finalized_checkpoint()); - self.fc_store - .set_justified_checkpoint(state.current_justified_checkpoint()) - .map_err(Error::UnableToSetJustifiedCheckpoint)?; - } + // If block is from past epochs, try to update store's justified & finalized checkpoints right away + if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) { + self.update_checkpoints( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + update_justified_checkpoint_slots, + spec, + )?; + } + + ( + Some(unrealized_justified_checkpoint), + Some(unrealized_finalized_checkpoint), + ) + } else { + (None, None) + }; let target_slot = block .slot() @@ -700,32 +888,68 @@ where // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. - self.proto_array.process_block(ProtoBlock { - slot: block.slot(), - root: block_root, - parent_root: Some(block.parent_root()), - target_root, - current_epoch_shuffling_id: AttestationShufflingId::new( - block_root, - state, - RelativeEpoch::Current, - ) - .map_err(Error::BeaconStateError)?, - next_epoch_shuffling_id: AttestationShufflingId::new( - block_root, - state, - RelativeEpoch::Next, - ) - .map_err(Error::BeaconStateError)?, - state_root: block.state_root(), - justified_checkpoint: state.current_justified_checkpoint(), - finalized_checkpoint: state.finalized_checkpoint(), - execution_status, - })?; + self.proto_array.process_block::<E>( + ProtoBlock { + slot: block.slot(), + root: block_root, + parent_root: Some(block.parent_root()), + target_root, + current_epoch_shuffling_id: AttestationShufflingId::new( + block_root, + state, + RelativeEpoch::Current, + ) + .map_err(Error::BeaconStateError)?, + next_epoch_shuffling_id: AttestationShufflingId::new( + block_root, + state, + RelativeEpoch::Next, + ) + .map_err(Error::BeaconStateError)?, + state_root: block.state_root(), + justified_checkpoint: state.current_justified_checkpoint(), + finalized_checkpoint: state.finalized_checkpoint(), + execution_status, + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + }, + current_slot, + )?; Ok(()) } + /// Update checkpoints in store if necessary + fn update_checkpoints( + &mut self, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + slots: UpdateJustifiedCheckpointSlots, + spec: &ChainSpec, + ) -> Result<(), Error<T::Error>> { + // Update justified checkpoint. + if justified_checkpoint.epoch > self.fc_store.justified_checkpoint().epoch { + if justified_checkpoint.epoch > self.fc_store.best_justified_checkpoint().epoch { + self.fc_store + .set_best_justified_checkpoint(justified_checkpoint); + } + if self.should_update_justified_checkpoint(justified_checkpoint, slots, spec)? { + self.fc_store + .set_justified_checkpoint(justified_checkpoint) + .map_err(Error::UnableToSetJustifiedCheckpoint)?; + } + } + + // Update finalized checkpoint. + if finalized_checkpoint.epoch > self.fc_store.finalized_checkpoint().epoch { + self.fc_store.set_finalized_checkpoint(finalized_checkpoint); + self.fc_store + .set_justified_checkpoint(justified_checkpoint) + .map_err(Error::UnableToSetJustifiedCheckpoint)?; + } + Ok(()) + } + /// Validates the `epoch` against the current time according to the fork choice store. /// /// ## Specification @@ -860,12 +1084,12 @@ where /// will not be run here. pub fn on_attestation( &mut self, - current_slot: Slot, + system_time_current_slot: Slot, attestation: &IndexedAttestation<E>, is_from_block: AttestationFromBlock, + spec: &ChainSpec, ) -> Result<(), Error<T::Error>> { - // Ensure the store is up-to-date. - self.update_time(current_slot)?; + self.update_time(system_time_current_slot, spec)?; // Ignore any attestations to the zero hash. // @@ -908,14 +1132,34 @@ where Ok(()) } + /// Apply an attester slashing to fork choice. + /// + /// We assume that the attester slashing provided to this function has already been verified. + pub fn on_attester_slashing(&mut self, slashing: &AttesterSlashing<E>) { + let attesting_indices_set = |att: &IndexedAttestation<E>| { + att.attesting_indices + .iter() + .copied() + .collect::<BTreeSet<_>>() + }; + let att1_indices = attesting_indices_set(&slashing.attestation_1); + let att2_indices = attesting_indices_set(&slashing.attestation_2); + self.fc_store + .extend_equivocating_indices(att1_indices.intersection(&att2_indices).copied()); + } + /// Call `on_tick` for all slots between `fc_store.get_current_slot()` and the provided /// `current_slot`. Returns the value of `self.fc_store.get_current_slot`. - pub fn update_time(&mut self, current_slot: Slot) -> Result<Slot, Error<T::Error>> { + pub fn update_time( + &mut self, + current_slot: Slot, + spec: &ChainSpec, + ) -> Result<Slot, Error<T::Error>> { while self.fc_store.get_current_slot() < current_slot { let previous_slot = self.fc_store.get_current_slot(); // Note: we are relying upon `on_tick` to update `fc_store.time` to ensure we don't // get stuck in a loop. - on_tick(&mut self.fc_store, previous_slot + 1)? + self.on_tick(previous_slot + 1, spec)? } // Process any attestations that might now be eligible. @@ -924,6 +1168,63 @@ where Ok(self.fc_store.get_current_slot()) } + /// Called whenever the current time increases. + /// + /// ## Specification + /// + /// Equivalent to: + /// + /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick + fn on_tick(&mut self, time: Slot, spec: &ChainSpec) -> Result<(), Error<T::Error>> { + let store = &mut self.fc_store; + let previous_slot = store.get_current_slot(); + + if time > previous_slot + 1 { + return Err(Error::InconsistentOnTick { + previous_slot, + time, + }); + } + + // Update store time. + store.set_current_slot(time); + + let current_slot = store.get_current_slot(); + + // Reset proposer boost if this is a new slot. + if current_slot > previous_slot { + store.set_proposer_boost_root(Hash256::zero()); + } + + // Not a new epoch, return. + if !(current_slot > previous_slot + && compute_slots_since_epoch_start::<E>(current_slot) == 0) + { + return Ok(()); + } + + if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch { + let store = &self.fc_store; + if self.is_descendant_of_finalized(store.best_justified_checkpoint().root) { + let store = &mut self.fc_store; + store + .set_justified_checkpoint(*store.best_justified_checkpoint()) + .map_err(Error::ForkChoiceStoreError)?; + } + } + + // Update store.justified_checkpoint if a better unrealized justified checkpoint is known + let unrealized_justified_checkpoint = *self.fc_store.unrealized_justified_checkpoint(); + let unrealized_finalized_checkpoint = *self.fc_store.unrealized_finalized_checkpoint(); + self.update_checkpoints( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + UpdateJustifiedCheckpointSlots::OnTick { current_slot }, + spec, + )?; + Ok(()) + } + /// Processes and removes from the queue any queued attestations which may now be eligible for /// processing due to the slot clock incrementing. fn process_attestation_queue(&mut self) -> Result<(), Error<T::Error>> { @@ -966,6 +1267,11 @@ where } } + /// Returns the weight for the given block root. + pub fn get_block_weight(&self, block_root: &Hash256) -> Option<u64> { + self.proto_array.get_weight(block_root) + } + /// Returns the `ProtoBlock` for the justified checkpoint. /// /// ## Notes @@ -995,6 +1301,45 @@ where .is_descendant(self.fc_store.finalized_checkpoint().root, block_root) } + /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. + /// + /// Returns `Ok(false)` if `block_root`'s execution payload has been elected as fully VALID, if + /// it is a pre-Bellatrix block or if it is before the PoW terminal block. + /// + /// In the case where the block could not be found in fork-choice, it returns the + /// `execution_status` of the current finalized block. + /// + /// This function assumes the `block_root` exists. + pub fn is_optimistic_or_invalid_block( + &self, + block_root: &Hash256, + ) -> Result<bool, Error<T::Error>> { + if let Some(status) = self.get_block_execution_status(block_root) { + Ok(status.is_optimistic_or_invalid()) + } else { + Ok(self + .get_finalized_block()? + .execution_status + .is_optimistic_or_invalid()) + } + } + + /// The same as `is_optimistic_block` but does not fallback to `self.get_finalized_block` + /// when the block cannot be found. + /// + /// Intended to be used when checking if the head has been imported optimistically or is + /// invalid. + pub fn is_optimistic_or_invalid_block_no_fallback( + &self, + block_root: &Hash256, + ) -> Result<bool, Error<T::Error>> { + if let Some(status) = self.get_block_execution_status(block_root) { + Ok(status.is_optimistic_or_invalid()) + } else { + Err(Error::MissingProtoArrayBlock(*block_root)) + } + } + /// Returns `Ok(false)` if a block is not viable to be imported optimistically. /// /// ## Notes @@ -1014,19 +1359,8 @@ where return Ok(true); } - // If the justified block has execution enabled, then optimistically import any block. - if self - .get_justified_block()? - .execution_status - .is_execution_enabled() - { - return Ok(true); - } - // If the parent block has execution enabled, always import the block. // - // TODO(bellatrix): this condition has not yet been merged into the spec. - // // See: // // https://github.com/ethereum/consensus-specs/pull/2844 @@ -1063,6 +1397,14 @@ where *self.fc_store.best_justified_checkpoint() } + pub fn unrealized_justified_checkpoint(&self) -> Checkpoint { + *self.fc_store.unrealized_justified_checkpoint() + } + + pub fn unrealized_finalized_checkpoint(&self) -> Checkpoint { + *self.fc_store.unrealized_finalized_checkpoint() + } + /// Returns the latest message for a given validator, if any. /// /// Returns `(block_root, block_slot)`. @@ -1080,6 +1422,12 @@ where &self.proto_array } + /// Returns a mutable reference to `proto_array`. + /// Should only be used in testing. + pub fn proto_array_mut(&mut self) -> &mut ProtoArrayForkChoice { + &mut self.proto_array + } + /// Returns a reference to the underlying `fc_store`. pub fn fc_store(&self) -> &T { &self.fc_store @@ -1104,22 +1452,117 @@ where .map_err(Into::into) } + /// Instantiate `Self` from some `PersistedForkChoice` generated by a earlier call to + /// `Self::to_persisted`. + pub fn proto_array_from_persisted( + persisted: &PersistedForkChoice, + reset_payload_statuses: ResetPayloadStatuses, + count_unrealized_full: CountUnrealizedFull, + spec: &ChainSpec, + log: &Logger, + ) -> Result<ProtoArrayForkChoice, Error<T::Error>> { + let mut proto_array = + ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes, count_unrealized_full) + .map_err(Error::InvalidProtoArrayBytes)?; + let contains_invalid_payloads = proto_array.contains_invalid_payloads(); + + debug!( + log, + "Restoring fork choice from persisted"; + "reset_payload_statuses" => ?reset_payload_statuses, + "contains_invalid_payloads" => contains_invalid_payloads, + ); + + // Exit early if there are no "invalid" payloads, if requested. + if matches!( + reset_payload_statuses, + ResetPayloadStatuses::OnlyWithInvalidPayload + ) && !contains_invalid_payloads + { + return Ok(proto_array); + } + + // Reset all blocks back to being "optimistic". This helps recover from an EL consensus + // fault where an invalid payload becomes valid. + if let Err(e) = proto_array.set_all_blocks_to_optimistic::<E>(spec) { + // If there is an error resetting the optimistic status then log loudly and revert + // back to a proto-array which does not have the reset applied. This indicates a + // significant error in Lighthouse and warrants detailed investigation. + crit!( + log, + "Failed to reset payload statuses"; + "error" => e, + "info" => "please report this error", + ); + ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes, count_unrealized_full) + .map_err(Error::InvalidProtoArrayBytes) + } else { + debug!( + log, + "Successfully reset all payload statuses"; + ); + Ok(proto_array) + } + } + /// Instantiate `Self` from some `PersistedForkChoice` generated by a earlier call to /// `Self::to_persisted`. pub fn from_persisted( persisted: PersistedForkChoice, + reset_payload_statuses: ResetPayloadStatuses, fc_store: T, + count_unrealized_full: CountUnrealizedFull, + spec: &ChainSpec, + log: &Logger, ) -> Result<Self, Error<T::Error>> { - let proto_array = ProtoArrayForkChoice::from_bytes(&persisted.proto_array_bytes) - .map_err(Error::InvalidProtoArrayBytes)?; + let proto_array = Self::proto_array_from_persisted( + &persisted, + reset_payload_statuses, + count_unrealized_full, + spec, + log, + )?; - Ok(Self { + let current_slot = fc_store.get_current_slot(); + + let mut fork_choice = Self { fc_store, proto_array, queued_attestations: persisted.queued_attestations, - forkchoice_update_parameters: None, + // Will be updated in the following call to `Self::get_head`. + forkchoice_update_parameters: ForkchoiceUpdateParameters { + head_hash: None, + justified_hash: None, + finalized_hash: None, + head_root: Hash256::zero(), + }, + // Will be updated in the following call to `Self::get_head`. + head_block_root: Hash256::zero(), _phantom: PhantomData, - }) + }; + + // If a call to `get_head` fails, the only known cause is because the only head with viable + // FFG properties is has an invalid payload. In this scenario, set all the payloads back to + // an optimistic status so that we can have a head to start from. + if let Err(e) = fork_choice.get_head(current_slot, spec) { + warn!( + log, + "Could not find head on persisted FC"; + "info" => "resetting all payload statuses and retrying", + "error" => ?e + ); + // Although we may have already made this call whilst loading `proto_array`, try it + // again since we may have mutated the `proto_array` during `get_head` and therefore may + // get a different result. + fork_choice + .proto_array + .set_all_blocks_to_optimistic::<E>(spec)?; + // If the second attempt at finding a head fails, return an error since we do not + // expect this scenario. + fork_choice.get_head(current_slot, spec)?; + } + + Ok(fork_choice) } /// Takes a snapshot of `Self` and stores it in `PersistedForkChoice`, allowing this struct to diff --git a/consensus/fork_choice/src/fork_choice_store.rs b/consensus/fork_choice/src/fork_choice_store.rs index 7826007516..9604e25475 100644 --- a/consensus/fork_choice/src/fork_choice_store.rs +++ b/consensus/fork_choice/src/fork_choice_store.rs @@ -1,4 +1,6 @@ -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; +use std::collections::BTreeSet; +use std::fmt::Debug; +use types::{BeaconBlockRef, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, Slot}; /// Approximates the `Store` in "Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice": /// @@ -17,7 +19,7 @@ use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecPayload, Hash256, /// concrete struct is to allow this crate to be free from "impure" on-disk database logic, /// hopefully making auditing easier. pub trait ForkChoiceStore<T: EthSpec>: Sized { - type Error; + type Error: Debug; /// Returns the last value passed to `Self::set_current_slot`. fn get_current_slot(&self) -> Slot; @@ -33,7 +35,7 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized { /// choice. Allows the implementer to performing caching or other housekeeping duties. fn on_verified_block<Payload: ExecPayload<T>>( &mut self, - block: &BeaconBlock<T, Payload>, + block: BeaconBlockRef<T, Payload>, block_root: Hash256, state: &BeaconState<T>, ) -> Result<(), Self::Error>; @@ -50,6 +52,12 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized { /// Returns the `finalized_checkpoint`. fn finalized_checkpoint(&self) -> &Checkpoint; + /// Returns the `unrealized_justified_checkpoint`. + fn unrealized_justified_checkpoint(&self) -> &Checkpoint; + + /// Returns the `unrealized_finalized_checkpoint`. + fn unrealized_finalized_checkpoint(&self) -> &Checkpoint; + /// Returns the `proposer_boost_root`. fn proposer_boost_root(&self) -> Hash256; @@ -62,6 +70,18 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized { /// Sets the `best_justified_checkpoint`. fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint); + /// Sets the `unrealized_justified_checkpoint`. + fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint); + + /// Sets the `unrealized_finalized_checkpoint`. + fn set_unrealized_finalized_checkpoint(&mut self, checkpoint: Checkpoint); + /// Sets the proposer boost root. fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256); + + /// Gets the equivocating indices. + fn equivocating_indices(&self) -> &BTreeSet<u64>; + + /// Adds to the set of equivocating indices. + fn extend_equivocating_indices(&mut self, indices: impl IntoIterator<Item = u64>); } diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 157306dd5f..b307c66d88 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -2,8 +2,11 @@ mod fork_choice; mod fork_choice_store; pub use crate::fork_choice::{ - AttestationFromBlock, Error, ForkChoice, InvalidAttestation, InvalidBlock, - PayloadVerificationStatus, PersistedForkChoice, QueuedAttestation, + AttestationFromBlock, CountUnrealized, Error, ForkChoice, ForkChoiceView, + ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, + PersistedForkChoice, QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; -pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; +pub use proto_array::{ + Block as ProtoBlock, CountUnrealizedFull, ExecutionStatus, InvalidationOperation, +}; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 3f8a2ac6b6..850f7c4a12 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -12,13 +12,13 @@ use beacon_chain::{ StateSkipConfig, WhenSlotSkipped, }; use fork_choice::{ - ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, + CountUnrealized, ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, + QueuedAttestation, }; use store::MemoryStore; use types::{ - test_utils::generate_deterministic_keypair, BeaconBlock, BeaconBlockRef, BeaconState, - ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, Slot, - SubnetId, + test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, + Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId, }; pub type E = MainnetEthSpec; @@ -74,7 +74,14 @@ impl ForkChoiceTest { where T: Fn(&BeaconForkChoiceStore<E, MemoryStore<E>, MemoryStore<E>>) -> U, { - func(&self.harness.chain.fork_choice.read().fc_store()) + func( + &self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .fc_store(), + ) } /// Assert the epochs match. @@ -109,15 +116,7 @@ impl ForkChoiceTest { /// Assert the given slot is greater than the head slot. pub fn assert_finalized_epoch_is_less_than(self, epoch: Epoch) -> Self { - assert!( - self.harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .epoch - < epoch - ); + assert!(self.harness.finalized_checkpoint().epoch < epoch); self } @@ -150,11 +149,17 @@ impl ForkChoiceTest { { self.harness .chain - .fork_choice - .write() - .update_time(self.harness.chain.slot().unwrap()) + .canonical_head + .fork_choice_write_lock() + .update_time(self.harness.chain.slot().unwrap(), &self.harness.spec) .unwrap(); - func(self.harness.chain.fork_choice.read().queued_attestations()); + func( + self.harness + .chain + .canonical_head + .fork_choice_read_lock() + .queued_attestations(), + ); self } @@ -173,7 +178,7 @@ impl ForkChoiceTest { } /// Build the chain whilst `predicate` returns `true` and `process_block_result` does not error. - pub fn apply_blocks_while<F>(self, mut predicate: F) -> Result<Self, Self> + pub async fn apply_blocks_while<F>(self, mut predicate: F) -> Result<Self, Self> where F: FnMut(BeaconBlockRef<'_, E>, &BeaconState<E>) -> bool, { @@ -182,12 +187,12 @@ impl ForkChoiceTest { let validators = self.harness.get_all_validators(); loop { let slot = self.harness.get_current_slot(); - let (block, state_) = self.harness.make_block(state, slot); + let (block, state_) = self.harness.make_block(state, slot).await; state = state_; if !predicate(block.message(), &state) { break; } - if let Ok(block_hash) = self.harness.process_block_result(block.clone()) { + if let Ok(block_hash) = self.harness.process_block_result(block.clone()).await { self.harness.attest_block( &state, block.state_root(), @@ -205,25 +210,29 @@ impl ForkChoiceTest { } /// Apply `count` blocks to the chain (with attestations). - pub fn apply_blocks(self, count: usize) -> Self { + pub async fn apply_blocks(self, count: usize) -> Self { self.harness.advance_slot(); - self.harness.extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + self.harness + .extend_chain( + count, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; self } /// Apply `count` blocks to the chain (without attestations). - pub fn apply_blocks_without_new_attestations(self, count: usize) -> Self { + pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { self.harness.advance_slot(); - self.harness.extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + self.harness + .extend_chain( + count, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; self } @@ -256,9 +265,9 @@ impl ForkChoiceTest { /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. - pub fn apply_block_directly_to_fork_choice<F>(self, mut func: F) -> Self + pub async fn apply_block_directly_to_fork_choice<F>(self, mut func: F) -> Self where - F: FnMut(&mut BeaconBlock<E>, &mut BeaconState<E>), + F: FnMut(&mut SignedBeaconBlock<E>, &mut BeaconState<E>), { let state = self .harness @@ -269,22 +278,22 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (signed_block, mut state) = self.harness.make_block(state, slot); - let (mut block, _) = signed_block.deconstruct(); - func(&mut block, &mut state); + let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; + func(&mut signed_block, &mut state); let current_slot = self.harness.get_current_slot(); self.harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_block( current_slot, - &block, - block.canonical_root(), + signed_block.message(), + signed_block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, &self.harness.chain.spec, + CountUnrealized::True, ) .unwrap(); self @@ -293,13 +302,13 @@ impl ForkChoiceTest { /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts that an error occurred and allows inspecting it via `comparison_func`. - pub fn apply_invalid_block_directly_to_fork_choice<F, G>( + pub async fn apply_invalid_block_directly_to_fork_choice<F, G>( self, mut mutation_func: F, mut comparison_func: G, ) -> Self where - F: FnMut(&mut BeaconBlock<E>, &mut BeaconState<E>), + F: FnMut(&mut SignedBeaconBlock<E>, &mut BeaconState<E>), G: FnMut(ForkChoiceError), { let state = self @@ -311,23 +320,23 @@ impl ForkChoiceTest { ) .unwrap(); let slot = self.harness.get_current_slot(); - let (signed_block, mut state) = self.harness.make_block(state, slot); - let (mut block, _) = signed_block.deconstruct(); - mutation_func(&mut block, &mut state); + let (mut signed_block, mut state) = self.harness.make_block(state, slot).await; + mutation_func(&mut signed_block, &mut state); let current_slot = self.harness.get_current_slot(); let err = self .harness .chain - .fork_choice - .write() + .canonical_head + .fork_choice_write_lock() .on_block( current_slot, - &block, - block.canonical_root(), + signed_block.message(), + signed_block.canonical_root(), Duration::from_secs(0), &state, PayloadVerificationStatus::Verified, &self.harness.chain.spec, + CountUnrealized::True, ) .err() .expect("on_block did not return an error"); @@ -339,7 +348,7 @@ impl ForkChoiceTest { /// database. fn check_justified_balances(&self) { let harness = &self.harness; - let fc = self.harness.chain.fork_choice.read(); + let fc = self.harness.chain.canonical_head.fork_choice_read_lock(); let state_root = harness .chain @@ -377,7 +386,7 @@ impl ForkChoiceTest { /// Returns an attestation that is valid for some slot in the given `chain`. /// /// Also returns some info about who created it. - fn apply_attestation_to_chain<F, G>( + async fn apply_attestation_to_chain<F, G>( self, delay: MutationDelay, mut mutation_func: F, @@ -387,7 +396,7 @@ impl ForkChoiceTest { F: FnMut(&mut IndexedAttestation<E>, &BeaconChain<EphemeralHarnessType<E>>), G: FnMut(Result<(), BeaconChainError>), { - let head = self.harness.chain.head().expect("should get head"); + let head = self.harness.chain.head_snapshot(); let current_slot = self.harness.chain.slot().expect("should get slot"); let mut attestation = self @@ -438,11 +447,13 @@ impl ForkChoiceTest { if let MutationDelay::Blocks(slots) = delay { self.harness.advance_slot(); - self.harness.extend_chain( - slots, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::SomeValidators(vec![]), - ); + self.harness + .extend_chain( + slots, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; } mutation_func( @@ -464,17 +475,9 @@ impl ForkChoiceTest { pub fn check_finalized_block_is_accessible(self) -> Self { self.harness .chain - .fork_choice - .write() - .get_block( - &self - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint - .root, - ) + .canonical_head + .fork_choice_read_lock() + .get_block(&self.harness.finalized_checkpoint().root) .unwrap(); self @@ -488,7 +491,7 @@ fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { #[test] fn justified_and_finalized_blocks() { let tester = ForkChoiceTest::new(); - let fork_choice = tester.harness.chain.fork_choice.read(); + let fork_choice = tester.harness.chain.canonical_head.fork_choice_read_lock(); let justified_checkpoint = fork_choice.justified_checkpoint(); assert_eq!(justified_checkpoint.epoch, 0); @@ -503,44 +506,50 @@ fn justified_and_finalized_blocks() { /// - The new justified checkpoint descends from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -#[test] -fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { +#[tokio::test] +async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .move_inside_safe_to_update() .assert_justified_epoch(0) .apply_blocks(1) + .await .assert_justified_epoch(2); } /// - The new justified checkpoint descends from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is **not** the first justification since genesis -#[test] -fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { +#[tokio::test] +async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) + .await .unwrap() .move_outside_safe_to_update() .assert_justified_epoch(2) .assert_best_justified_epoch(2) .apply_blocks(1) + .await .assert_justified_epoch(3); } /// - The new justified checkpoint descends from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is the first justification since genesis -#[test] -fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { +#[tokio::test] +async fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .move_to_next_unsafe_period() .assert_justified_epoch(0) .assert_best_justified_epoch(0) .apply_blocks(1) + .await .assert_justified_epoch(2) .assert_best_justified_epoch(2); } @@ -548,12 +557,14 @@ fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has **not** increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_inside_safe_to_update() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -567,6 +578,7 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(3) .assert_best_justified_epoch(3); } @@ -574,12 +586,14 @@ fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_fi /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`. /// - Finalized epoch has **not** increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -593,6 +607,7 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(2) .assert_best_justified_epoch(3); } @@ -600,12 +615,14 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_f /// - The new justified checkpoint **does not** descend from the current. /// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has increased. -#[test] -fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { +#[tokio::test] +async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .move_to_next_unsafe_period() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { @@ -619,17 +636,20 @@ fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_fina .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); }) + .await .assert_justified_epoch(3) .assert_best_justified_epoch(3); } /// Check that the balances are obtained correctly. -#[test] -fn justified_balances() { +#[tokio::test] +async fn justified_balances() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_justified_epoch(2) .check_justified_balances() } @@ -648,15 +668,16 @@ macro_rules! assert_invalid_block { /// Specification v0.12.1 /// /// assert block.parent_root in store.block_states -#[test] -fn invalid_block_unknown_parent() { +#[tokio::test] +async fn invalid_block_unknown_parent() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks(2) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.parent_root_mut() = junk; + *block.message_mut().parent_root_mut() = junk; }, |err| { assert_invalid_block!( @@ -665,36 +686,42 @@ fn invalid_block_unknown_parent() { if parent == junk ) }, - ); + ) + .await; } /// Specification v0.12.1 /// /// assert get_current_slot(store) >= block.slot -#[test] -fn invalid_block_future_slot() { +#[tokio::test] +async fn invalid_block_future_slot() { ForkChoiceTest::new() .apply_blocks(2) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.slot_mut() += 1; + *block.message_mut().slot_mut() += 1; }, |err| assert_invalid_block!(err, InvalidBlock::FutureSlot { .. }), - ); + ) + .await; } /// Specification v0.12.1 /// /// assert block.slot > finalized_slot -#[test] -fn invalid_block_finalized_slot() { +#[tokio::test] +async fn invalid_block_finalized_slot() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .apply_invalid_block_directly_to_fork_choice( |block, _| { - *block.slot_mut() = Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; + *block.message_mut().slot_mut() = + Epoch::new(2).start_slot(E::slots_per_epoch()) - 1; }, |err| { assert_invalid_block!( @@ -703,7 +730,8 @@ fn invalid_block_finalized_slot() { if finalized_slot == Epoch::new(2).start_slot(E::slots_per_epoch()) ) }, - ); + ) + .await; } /// Specification v0.12.1 @@ -714,18 +742,20 @@ fn invalid_block_finalized_slot() { /// Note: we technically don't do this exact check, but an equivalent check. Reference: /// /// https://github.com/ethereum/eth2.0-specs/pull/1884 -#[test] -fn invalid_block_finalized_descendant() { +#[tokio::test] +async fn invalid_block_finalized_descendant() { let invalid_ancestor = Mutex::new(Hash256::zero()); ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2) .apply_invalid_block_directly_to_fork_choice( |block, state| { - *block.parent_root_mut() = *state + *block.message_mut().parent_root_mut() = *state .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) .unwrap(); *invalid_ancestor.lock().unwrap() = block.parent_root(); @@ -737,7 +767,8 @@ fn invalid_block_finalized_descendant() { if block_ancestor == Some(*invalid_ancestor.lock().unwrap()) ) }, - ); + ) + .await; } macro_rules! assert_invalid_attestation { @@ -754,23 +785,26 @@ macro_rules! assert_invalid_attestation { } /// Ensure we can process a valid attestation. -#[test] -fn valid_attestation() { +#[tokio::test] +async fn valid_attestation() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, |result| assert_eq!(result.unwrap(), ()), - ); + ) + .await; } /// This test is not in the specification, however we reject an attestation with an empty /// aggregation bitfield since it has no purpose beyond wasting our time. -#[test] -fn invalid_attestation_empty_bitfield() { +#[tokio::test] +async fn invalid_attestation_empty_bitfield() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -779,7 +813,8 @@ fn invalid_attestation_empty_bitfield() { |result| { assert_invalid_attestation!(result, InvalidAttestation::EmptyAggregationBitfield) }, - ); + ) + .await; } /// Specification v0.12.1: @@ -787,10 +822,11 @@ fn invalid_attestation_empty_bitfield() { /// assert target.epoch in [expected_current_epoch, previous_epoch] /// /// (tests epoch after current epoch) -#[test] -fn invalid_attestation_future_epoch() { +#[tokio::test] +async fn invalid_attestation_future_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -803,7 +839,8 @@ fn invalid_attestation_future_epoch() { if attestation_epoch == Epoch::new(2) && current_epoch == Epoch::new(0) ) }, - ); + ) + .await; } /// Specification v0.12.1: @@ -811,10 +848,11 @@ fn invalid_attestation_future_epoch() { /// assert target.epoch in [expected_current_epoch, previous_epoch] /// /// (tests epoch prior to previous epoch) -#[test] -fn invalid_attestation_past_epoch() { +#[tokio::test] +async fn invalid_attestation_past_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(E::slots_per_epoch() as usize * 3 + 1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -827,16 +865,18 @@ fn invalid_attestation_past_epoch() { if attestation_epoch == Epoch::new(0) && current_epoch == Epoch::new(3) ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.epoch == compute_epoch_at_slot(attestation.data.slot) -#[test] -fn invalid_attestation_target_epoch() { +#[tokio::test] +async fn invalid_attestation_target_epoch() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(E::slots_per_epoch() as usize + 1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -849,18 +889,20 @@ fn invalid_attestation_target_epoch() { if target == Epoch::new(1) && slot == Slot::new(1) ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.root in store.blocks -#[test] -fn invalid_attestation_unknown_target_root() { +#[tokio::test] +async fn invalid_attestation_unknown_target_root() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -873,18 +915,20 @@ fn invalid_attestation_unknown_target_root() { if root == junk ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert attestation.data.beacon_block_root in store.blocks -#[test] -fn invalid_attestation_unknown_beacon_block_root() { +#[tokio::test] +async fn invalid_attestation_unknown_beacon_block_root() { let junk = Hash256::from_low_u64_be(42); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, _| { @@ -897,16 +941,18 @@ fn invalid_attestation_unknown_beacon_block_root() { if beacon_block_root == junk ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot -#[test] -fn invalid_attestation_future_block() { +#[tokio::test] +async fn invalid_attestation_future_block() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::Blocks(1), |attestation, chain| { @@ -923,19 +969,21 @@ fn invalid_attestation_future_block() { if block == 2 && attestation == 1 ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot) -#[test] -fn invalid_attestation_inconsistent_ffg_vote() { +#[tokio::test] +async fn invalid_attestation_inconsistent_ffg_vote() { let local_opt = Mutex::new(None); let attestation_opt = Mutex::new(None); ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .apply_attestation_to_chain( MutationDelay::NoDelay, |attestation, chain| { @@ -962,22 +1010,25 @@ fn invalid_attestation_inconsistent_ffg_vote() { && local == local_opt.lock().unwrap().unwrap() ) }, - ); + ) + .await; } /// Specification v0.12.1: /// /// assert get_current_slot(store) >= attestation.data.slot + 1 -#[test] -fn invalid_attestation_delayed_slot() { +#[tokio::test] +async fn invalid_attestation_delayed_slot() { ForkChoiceTest::new() .apply_blocks_without_new_attestations(1) + .await .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0)) .apply_attestation_to_chain( MutationDelay::NoDelay, |_, _| {}, |result| assert_eq!(result.unwrap(), ()), ) + .await .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 1)) .skip_slot() .inspect_queued_attestations(|queue| assert_eq!(queue.len(), 0)); @@ -985,10 +1036,11 @@ fn invalid_attestation_delayed_slot() { /// Tests that the correct target root is used when the attested-to block is in a prior epoch to /// the attestation. -#[test] -fn valid_attestation_skip_across_epoch() { +#[tokio::test] +async fn valid_attestation_skip_across_epoch() { ForkChoiceTest::new() .apply_blocks(E::slots_per_epoch() as usize - 1) + .await .skip_slots(2) .apply_attestation_to_chain( MutationDelay::NoDelay, @@ -999,15 +1051,18 @@ fn valid_attestation_skip_across_epoch() { ) }, |result| result.unwrap(), - ); + ) + .await; } -#[test] -fn can_read_finalized_block() { +#[tokio::test] +async fn can_read_finalized_block() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .check_finalized_block_is_accessible(); } @@ -1025,8 +1080,8 @@ fn weak_subjectivity_fail_on_startup() { ForkChoiceTest::new_with_chain_config(chain_config); } -#[test] -fn weak_subjectivity_pass_on_startup() { +#[tokio::test] +async fn weak_subjectivity_pass_on_startup() { let epoch = Epoch::new(0); let root = Hash256::zero(); @@ -1037,23 +1092,21 @@ fn weak_subjectivity_pass_on_startup() { ForkChoiceTest::new_with_chain_config(chain_config) .apply_blocks(E::slots_per_epoch() as usize) + .await .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_passes() { +#[tokio::test] +async fn weak_subjectivity_check_passes() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let checkpoint = setup_harness.harness.finalized_checkpoint(); let chain_config = ChainConfig { weak_subjectivity_checkpoint: Some(checkpoint), @@ -1062,26 +1115,25 @@ fn weak_subjectivity_check_passes() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2) .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_fails_early_epoch() { +#[tokio::test] +async fn weak_subjectivity_check_fails_early_epoch() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.epoch = checkpoint.epoch - 1; @@ -1092,25 +1144,23 @@ fn weak_subjectivity_check_fails_early_epoch() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_fails_late_epoch() { +#[tokio::test] +async fn weak_subjectivity_check_fails_late_epoch() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.epoch = checkpoint.epoch + 1; @@ -1121,25 +1171,23 @@ fn weak_subjectivity_check_fails_late_epoch() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 4) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_fails_incorrect_root() { +#[tokio::test] +async fn weak_subjectivity_check_fails_incorrect_root() { let setup_harness = ForkChoiceTest::new() .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(2); - let mut checkpoint = setup_harness - .harness - .chain - .head_info() - .unwrap() - .finalized_checkpoint; + let mut checkpoint = setup_harness.harness.finalized_checkpoint(); checkpoint.root = Hash256::zero(); @@ -1150,27 +1198,31 @@ fn weak_subjectivity_check_fails_incorrect_root() { ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 3) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); } -#[test] -fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { +#[tokio::test] +async fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { let setup_harness = ForkChoiceTest::new() // first two epochs .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap(); // get the head, it will become the finalized root of epoch 4 - let checkpoint_root = setup_harness.harness.chain.head_info().unwrap().block_root; + let checkpoint_root = setup_harness.harness.head_block_root(); setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5); // the checkpoint at epoch 4 should become the root of last block of epoch 2 @@ -1187,31 +1239,37 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5) .assert_shutdown_signal_not_sent(); } -#[test] -fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { +#[tokio::test] +async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { let setup_harness = ForkChoiceTest::new() // first two epochs .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap(); // get the head, it will become the finalized root of epoch 4 - let checkpoint_root = setup_harness.harness.chain.head_info().unwrap().block_root; + let checkpoint_root = setup_harness.harness.head_block_root(); setup_harness // epoch 3 will be entirely skip slots .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await .unwrap() .apply_blocks(1) + .await .assert_finalized_epoch(5); // Invalid checkpoint (epoch too early) @@ -1228,9 +1286,11 @@ fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() { // recreate the chain exactly ForkChoiceTest::new_with_chain_config(chain_config.clone()) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await .unwrap() .skip_slots(E::slots_per_epoch() as usize) .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 6) + .await .unwrap_err() .assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_shutdown_signal_sent(); diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 79b4cb2d80..826bf6c3a7 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -1,4 +1,4 @@ -use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256}; +use types::{Checkpoint, Epoch, ExecutionBlockHash, Hash256, Slot}; #[derive(Clone, PartialEq, Debug)] pub enum Error { @@ -52,6 +52,7 @@ pub enum Error { #[derive(Clone, PartialEq, Debug)] pub struct InvalidBestNodeInfo { + pub current_slot: Slot, pub start_root: Hash256, pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 2980c019e8..ba6f3170dc 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -3,9 +3,11 @@ mod ffg_updates; mod no_votes; mod votes; +use crate::proto_array::CountUnrealizedFull; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::InvalidationOperation; use serde_derive::{Deserialize, Serialize}; +use std::collections::BTreeSet; use types::{ AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, Slot, @@ -78,7 +80,7 @@ impl ForkChoiceTestDefinition { let junk_shuffling_id = AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); - let mut fork_choice = ProtoArrayForkChoice::new( + let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>( self.finalized_block_slot, Hash256::zero(), self.justified_checkpoint, @@ -86,8 +88,10 @@ impl ForkChoiceTestDefinition { junk_shuffling_id.clone(), junk_shuffling_id, ExecutionStatus::Optimistic(ExecutionBlockHash::zero()), + CountUnrealizedFull::default(), ) .expect("should create fork choice struct"); + let equivocating_indices = BTreeSet::new(); for (op_index, op) in self.operations.into_iter().enumerate() { match op.clone() { @@ -103,9 +107,10 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), + &equivocating_indices, + Slot::new(0), &spec, ) - .map_err(|e| e) .unwrap_or_else(|e| { panic!("find_head op at index {} returned error {}", op_index, e) }); @@ -130,9 +135,10 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, proposer_boost_root, + &equivocating_indices, + Slot::new(0), &spec, ) - .map_err(|e| e) .unwrap_or_else(|e| { panic!("find_head op at index {} returned error {}", op_index, e) }); @@ -154,6 +160,8 @@ impl ForkChoiceTestDefinition { finalized_checkpoint, &justified_state_balances, Hash256::zero(), + &equivocating_indices, + Slot::new(0), &spec, ); @@ -192,13 +200,17 @@ impl ForkChoiceTestDefinition { execution_status: ExecutionStatus::Optimistic( ExecutionBlockHash::from_root(root), ), + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, }; - fork_choice.process_block(block).unwrap_or_else(|e| { - panic!( - "process_block op at index {} returned error: {:?}", - op_index, e - ) - }); + fork_choice + .process_block::<MainnetEthSpec>(block, slot) + .unwrap_or_else(|e| { + panic!( + "process_block op at index {} returned error: {:?}", + op_index, e + ) + }); check_bytes_round_trip(&fork_choice); } Operation::ProcessAttestation { @@ -286,8 +298,8 @@ fn get_checkpoint(i: u64) -> Checkpoint { fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { let bytes = original.as_bytes(); - let decoded = - ProtoArrayForkChoice::from_bytes(&bytes).expect("fork choice should decode from bytes"); + let decoded = ProtoArrayForkChoice::from_bytes(&bytes, CountUnrealizedFull::default()) + .expect("fork choice should decode from bytes"); assert!( *original == decoded, "fork choice should encode and decode without change" diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index d6f614b7c3..e7bd9c0ed5 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -4,7 +4,7 @@ mod proto_array; mod proto_array_fork_choice; mod ssz_container; -pub use crate::proto_array::InvalidationOperation; +pub use crate::proto_array::{CountUnrealizedFull, InvalidationOperation}; pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; pub use error::Error; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 3f7909553b..590407d7eb 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -16,6 +16,7 @@ four_byte_option_impl!(four_byte_option_usize, usize); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); /// Defines an operation which may invalidate the `execution_status` of some nodes. +#[derive(Clone, Debug)] pub enum InvalidationOperation { /// Invalidate only `block_root` and it's descendants. Don't invalidate any ancestors. InvalidateOne { block_root: Hash256 }, @@ -96,6 +97,10 @@ pub struct ProtoNode { /// Indicates if an execution node has marked this block as valid. Also contains the execution /// block hash. pub execution_status: ExecutionStatus, + #[ssz(with = "four_byte_option_checkpoint")] + pub unrealized_justified_checkpoint: Option<Checkpoint>, + #[ssz(with = "four_byte_option_checkpoint")] + pub unrealized_finalized_checkpoint: Option<Checkpoint>, } #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] @@ -113,6 +118,24 @@ impl Default for ProposerBoost { } } +/// Indicate whether we should strictly count unrealized justification/finalization votes. +#[derive(Default, PartialEq, Eq, Debug, Serialize, Deserialize, Copy, Clone)] +pub enum CountUnrealizedFull { + True, + #[default] + False, +} + +impl From<bool> for CountUnrealizedFull { + fn from(b: bool) -> Self { + if b { + CountUnrealizedFull::True + } else { + CountUnrealizedFull::False + } + } +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes @@ -123,6 +146,7 @@ pub struct ProtoArray { pub nodes: Vec<ProtoNode>, pub indices: HashMap<Hash256, usize>, pub previous_proposer_boost: ProposerBoost, + pub count_unrealized_full: CountUnrealizedFull, } impl ProtoArray { @@ -139,6 +163,7 @@ impl ProtoArray { /// - Compare the current node with the parents best-child, updating it if the current node /// should become the best child. /// - If required, update the parents best-descendant with the current node or its best-descendant. + #[allow(clippy::too_many_arguments)] pub fn apply_score_changes<E: EthSpec>( &mut self, mut deltas: Vec<i64>, @@ -146,6 +171,7 @@ impl ProtoArray { finalized_checkpoint: Checkpoint, new_balances: &[u64], proposer_boost_root: Hash256, + current_slot: Slot, spec: &ChainSpec, ) -> Result<(), Error> { if deltas.len() != self.indices.len() { @@ -240,7 +266,7 @@ impl ProtoArray { // not exist. node.weight = node .weight - .checked_sub(node_delta.abs() as u64) + .checked_sub(node_delta.unsigned_abs()) .ok_or(Error::DeltaOverflow(node_index))?; } else { node.weight = node @@ -279,7 +305,11 @@ impl ProtoArray { // If the node has a parent, try to update its best-child and best-descendant. if let Some(parent_index) = node.parent { - self.maybe_update_best_child_and_descendant(parent_index, node_index)?; + self.maybe_update_best_child_and_descendant::<E>( + parent_index, + node_index, + current_slot, + )?; } } @@ -289,7 +319,7 @@ impl ProtoArray { /// Register a block with the fork choice. /// /// It is only sane to supply a `None` parent for the genesis block. - pub fn on_block(&mut self, block: Block) -> Result<(), Error> { + pub fn on_block<E: EthSpec>(&mut self, block: Block, current_slot: Slot) -> Result<(), Error> { // If the block is already known, simply ignore it. if self.indices.contains_key(&block.root) { return Ok(()); @@ -313,6 +343,8 @@ impl ProtoArray { best_child: None, best_descendant: None, execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, }; // If the parent has an invalid execution status, return an error before adding the block to @@ -334,7 +366,11 @@ impl ProtoArray { self.nodes.push(node.clone()); if let Some(parent_index) = node.parent { - self.maybe_update_best_child_and_descendant(parent_index, node_index)?; + self.maybe_update_best_child_and_descendant::<E>( + parent_index, + node_index, + current_slot, + )?; if matches!(block.execution_status, ExecutionStatus::Valid(_)) { self.propagate_execution_payload_validation_by_index(parent_index)?; @@ -490,9 +526,6 @@ impl ProtoArray { node.best_descendant = None } - // It might be new knowledge that this block is valid, ensure that it and all - // ancestors are marked as valid. - self.propagate_execution_payload_validation_by_index(index)?; break; } } @@ -606,7 +639,11 @@ impl ProtoArray { /// been called without a subsequent `Self::apply_score_changes` call. This is because /// `on_new_block` does not attempt to walk backwards through the tree and update the /// best-child/best-descendant links. - pub fn find_head(&self, justified_root: &Hash256) -> Result<Hash256, Error> { + pub fn find_head<E: EthSpec>( + &self, + justified_root: &Hash256, + current_slot: Slot, + ) -> Result<Hash256, Error> { let justified_index = self .indices .get(justified_root) @@ -639,8 +676,9 @@ impl ProtoArray { .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; // Perform a sanity check that the node is indeed valid to be the head. - if !self.node_is_viable_for_head(best_node) { + if !self.node_is_viable_for_head::<E>(best_node, current_slot) { return Err(Error::InvalidBestNode(Box::new(InvalidBestNodeInfo { + current_slot, start_root: *justified_root, justified_checkpoint: self.justified_checkpoint, finalized_checkpoint: self.finalized_checkpoint, @@ -735,10 +773,11 @@ impl ProtoArray { /// best-descendant. /// - The child is not the best child but becomes the best child. /// - The child is not the best child and does not become the best child. - fn maybe_update_best_child_and_descendant( + fn maybe_update_best_child_and_descendant<E: EthSpec>( &mut self, parent_index: usize, child_index: usize, + current_slot: Slot, ) -> Result<(), Error> { let child = self .nodes @@ -750,7 +789,8 @@ impl ProtoArray { .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - let child_leads_to_viable_head = self.node_leads_to_viable_head(child)?; + let child_leads_to_viable_head = + self.node_leads_to_viable_head::<E>(child, current_slot)?; // These three variables are aliases to the three options that we may set the // `parent.best_child` and `parent.best_descendant` to. @@ -763,54 +803,54 @@ impl ProtoArray { ); let no_change = (parent.best_child, parent.best_descendant); - let (new_best_child, new_best_descendant) = if let Some(best_child_index) = - parent.best_child - { - if best_child_index == child_index && !child_leads_to_viable_head { - // If the child is already the best-child of the parent but it's not viable for - // the head, remove it. - change_to_none - } else if best_child_index == child_index { - // If the child is the best-child already, set it again to ensure that the - // best-descendant of the parent is updated. - change_to_child - } else { - let best_child = self - .nodes - .get(best_child_index) - .ok_or(Error::InvalidBestDescendant(best_child_index))?; - - let best_child_leads_to_viable_head = self.node_leads_to_viable_head(best_child)?; - - if child_leads_to_viable_head && !best_child_leads_to_viable_head { - // The child leads to a viable head, but the current best-child doesn't. + let (new_best_child, new_best_descendant) = + if let Some(best_child_index) = parent.best_child { + if best_child_index == child_index && !child_leads_to_viable_head { + // If the child is already the best-child of the parent but it's not viable for + // the head, remove it. + change_to_none + } else if best_child_index == child_index { + // If the child is the best-child already, set it again to ensure that the + // best-descendant of the parent is updated. change_to_child - } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { - // The best child leads to a viable head, but the child doesn't. - no_change - } else if child.weight == best_child.weight { - // Tie-breaker of equal weights by root. - if child.root >= best_child.root { - change_to_child - } else { - no_change - } } else { - // Choose the winner by weight. - if child.weight >= best_child.weight { + let best_child = self + .nodes + .get(best_child_index) + .ok_or(Error::InvalidBestDescendant(best_child_index))?; + + let best_child_leads_to_viable_head = + self.node_leads_to_viable_head::<E>(best_child, current_slot)?; + + if child_leads_to_viable_head && !best_child_leads_to_viable_head { + // The child leads to a viable head, but the current best-child doesn't. change_to_child - } else { + } else if !child_leads_to_viable_head && best_child_leads_to_viable_head { + // The best child leads to a viable head, but the child doesn't. no_change + } else if child.weight == best_child.weight { + // Tie-breaker of equal weights by root. + if child.root >= best_child.root { + change_to_child + } else { + no_change + } + } else { + // Choose the winner by weight. + if child.weight >= best_child.weight { + change_to_child + } else { + no_change + } } } - } - } else if child_leads_to_viable_head { - // There is no current best-child and the child is viable. - change_to_child - } else { - // There is no current best-child but the child is not viable. - no_change - }; + } else if child_leads_to_viable_head { + // There is no current best-child and the child is viable. + change_to_child + } else { + // There is no current best-child but the child is not viable. + no_change + }; let parent = self .nodes @@ -825,7 +865,11 @@ impl ProtoArray { /// Indicates if the node itself is viable for the head, or if it's best descendant is viable /// for the head. - fn node_leads_to_viable_head(&self, node: &ProtoNode) -> Result<bool, Error> { + fn node_leads_to_viable_head<E: EthSpec>( + &self, + node: &ProtoNode, + current_slot: Slot, + ) -> Result<bool, Error> { let best_descendant_is_viable_for_head = if let Some(best_descendant_index) = node.best_descendant { let best_descendant = self @@ -833,12 +877,13 @@ impl ProtoArray { .get(best_descendant_index) .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; - self.node_is_viable_for_head(best_descendant) + self.node_is_viable_for_head::<E>(best_descendant, current_slot) } else { false }; - Ok(best_descendant_is_viable_for_head || self.node_is_viable_for_head(node)) + Ok(best_descendant_is_viable_for_head + || self.node_is_viable_for_head::<E>(node, current_slot)) } /// This is the equivalent to the `filter_block_tree` function in the eth2 spec: @@ -847,18 +892,58 @@ impl ProtoArray { /// /// Any node that has a different finalized or justified epoch should not be viable for the /// head. - fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool { + fn node_is_viable_for_head<E: EthSpec>(&self, node: &ProtoNode, current_slot: Slot) -> bool { if node.execution_status.is_invalid() { return false; } - if let (Some(node_justified_checkpoint), Some(node_finalized_checkpoint)) = + let genesis_epoch = Epoch::new(0); + + let checkpoint_match_predicate = + |node_justified_checkpoint: Checkpoint, node_finalized_checkpoint: Checkpoint| { + let correct_justified = node_justified_checkpoint == self.justified_checkpoint + || self.justified_checkpoint.epoch == genesis_epoch; + let correct_finalized = node_finalized_checkpoint == self.finalized_checkpoint + || self.finalized_checkpoint.epoch == genesis_epoch; + correct_justified && correct_finalized + }; + + if let ( + Some(unrealized_justified_checkpoint), + Some(unrealized_finalized_checkpoint), + Some(justified_checkpoint), + Some(finalized_checkpoint), + ) = ( + node.unrealized_justified_checkpoint, + node.unrealized_finalized_checkpoint, + node.justified_checkpoint, + node.finalized_checkpoint, + ) { + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + + // If previous epoch is justified, pull up all tips to at least the previous epoch + if CountUnrealizedFull::True == self.count_unrealized_full + && (current_epoch > genesis_epoch + && self.justified_checkpoint.epoch + 1 == current_epoch) + { + unrealized_justified_checkpoint.epoch + 1 >= current_epoch + // If previous epoch is not justified, pull up only tips from past epochs up to the current epoch + } else { + // If block is from a previous epoch, filter using unrealized justification & finalization information + if node.slot.epoch(E::slots_per_epoch()) < current_epoch { + checkpoint_match_predicate( + unrealized_justified_checkpoint, + unrealized_finalized_checkpoint, + ) + // If block is from the current epoch, filter using the head state's justification & finalization information + } else { + checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) + } + } + } else if let (Some(justified_checkpoint), Some(finalized_checkpoint)) = (node.justified_checkpoint, node.finalized_checkpoint) { - (node_justified_checkpoint == self.justified_checkpoint - || self.justified_checkpoint.epoch == Epoch::new(0)) - && (node_finalized_checkpoint == self.finalized_checkpoint - || self.finalized_checkpoint.epoch == Epoch::new(0)) + checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint) } else { false } @@ -930,7 +1015,7 @@ impl ProtoArray { /// Returns `None` if there is an overflow or underflow when calculating the score. /// /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#get_latest_attesting_balance -fn calculate_proposer_boost<E: EthSpec>( +pub fn calculate_proposer_boost<E: EthSpec>( validator_balances: &[u64], proposer_score_boost: u64, ) -> Option<u64> { diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 88bf7840c2..8f5d062ec6 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -1,10 +1,13 @@ use crate::error::Error; -use crate::proto_array::{InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode}; +use crate::proto_array::CountUnrealizedFull; +use crate::proto_array::{ + calculate_proposer_boost, InvalidationOperation, Iter, ProposerBoost, ProtoArray, ProtoNode, +}; use crate::ssz_container::SszContainer; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; +use std::collections::{BTreeSet, HashMap}; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -36,7 +39,7 @@ pub enum ExecutionStatus { /// /// This `bool` only exists to satisfy our SSZ implementation which requires all variants /// to have a value. It can be set to anything. - Irrelevant(bool), // TODO(merge): fix bool. + Irrelevant(bool), } impl ExecutionStatus { @@ -87,10 +90,22 @@ impl ExecutionStatus { /// /// - Has execution enabled, AND /// - Has a payload that has not yet been verified by an EL. - pub fn is_optimistic(&self) -> bool { + pub fn is_strictly_optimistic(&self) -> bool { matches!(self, ExecutionStatus::Optimistic(_)) } + /// Returns `true` if the block: + /// + /// - Has execution enabled, AND + /// - Has a payload that has not yet been verified by an EL, OR. + /// - Has a payload that has been deemed invalid by an EL. + pub fn is_optimistic_or_invalid(&self) -> bool { + matches!( + self, + ExecutionStatus::Optimistic(_) | ExecutionStatus::Invalid(_) + ) + } + /// Returns `true` if the block: /// /// - Has execution enabled, AND @@ -124,6 +139,8 @@ pub struct Block { /// Indicates if an execution node has marked this block as valid. Also contains the execution /// block hash. pub execution_status: ExecutionStatus, + pub unrealized_justified_checkpoint: Option<Checkpoint>, + pub unrealized_finalized_checkpoint: Option<Checkpoint>, } /// A Vec-wrapper which will grow to match any request. @@ -162,7 +179,7 @@ pub struct ProtoArrayForkChoice { impl ProtoArrayForkChoice { #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new<E: EthSpec>( finalized_block_slot: Slot, finalized_block_state_root: Hash256, justified_checkpoint: Checkpoint, @@ -170,6 +187,7 @@ impl ProtoArrayForkChoice { current_epoch_shuffling_id: AttestationShufflingId, next_epoch_shuffling_id: AttestationShufflingId, execution_status: ExecutionStatus, + count_unrealized_full: CountUnrealizedFull, ) -> Result<Self, String> { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, @@ -178,6 +196,7 @@ impl ProtoArrayForkChoice { nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), previous_proposer_boost: ProposerBoost::default(), + count_unrealized_full, }; let block = Block { @@ -193,10 +212,12 @@ impl ProtoArrayForkChoice { justified_checkpoint, finalized_checkpoint, execution_status, + unrealized_justified_checkpoint: Some(justified_checkpoint), + unrealized_finalized_checkpoint: Some(finalized_checkpoint), }; proto_array - .on_block(block) + .on_block::<E>(block, finalized_block_slot) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; Ok(Self { @@ -242,22 +263,29 @@ impl ProtoArrayForkChoice { Ok(()) } - pub fn process_block(&mut self, block: Block) -> Result<(), String> { + pub fn process_block<E: EthSpec>( + &mut self, + block: Block, + current_slot: Slot, + ) -> Result<(), String> { if block.parent_root.is_none() { return Err("Missing parent root".to_string()); } self.proto_array - .on_block(block) + .on_block::<E>(block, current_slot) .map_err(|e| format!("process_block_error: {:?}", e)) } + #[allow(clippy::too_many_arguments)] pub fn find_head<E: EthSpec>( &mut self, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint, justified_state_balances: &[u64], proposer_boost_root: Hash256, + equivocating_indices: &BTreeSet<u64>, + current_slot: Slot, spec: &ChainSpec, ) -> Result<Hash256, String> { let old_balances = &mut self.balances; @@ -269,6 +297,7 @@ impl ProtoArrayForkChoice { &mut self.votes, old_balances, new_balances, + equivocating_indices, ) .map_err(|e| format!("find_head compute_deltas failed: {:?}", e))?; @@ -279,6 +308,7 @@ impl ProtoArrayForkChoice { finalized_checkpoint, new_balances, proposer_boost_root, + current_slot, spec, ) .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; @@ -286,10 +316,121 @@ impl ProtoArrayForkChoice { *old_balances = new_balances.to_vec(); self.proto_array - .find_head(&justified_checkpoint.root) + .find_head::<E>(&justified_checkpoint.root, current_slot) .map_err(|e| format!("find_head failed: {:?}", e)) } + /// Returns `true` if there are any blocks in `self` with an `INVALID` execution payload status. + /// + /// This will operate on *all* blocks, even those that do not descend from the finalized + /// ancestor. + pub fn contains_invalid_payloads(&mut self) -> bool { + self.proto_array + .nodes + .iter() + .any(|node| node.execution_status.is_invalid()) + } + + /// For all nodes, regardless of their relationship to the finalized block, set their execution + /// status to be optimistic. + /// + /// In practice this means forgetting any `VALID` or `INVALID` statuses. + pub fn set_all_blocks_to_optimistic<E: EthSpec>( + &mut self, + spec: &ChainSpec, + ) -> Result<(), String> { + // Iterate backwards through all nodes in the `proto_array`. Whilst it's not strictly + // required to do this process in reverse, it seems natural when we consider how LMD votes + // are counted. + // + // This function will touch all blocks, even those that do not descend from the finalized + // block. Since this function is expected to run at start-up during very rare + // circumstances we prefer simplicity over efficiency. + for node_index in (0..self.proto_array.nodes.len()).rev() { + let node = self + .proto_array + .nodes + .get_mut(node_index) + .ok_or("unreachable index out of bounds in proto_array nodes")?; + + match node.execution_status { + ExecutionStatus::Invalid(block_hash) => { + node.execution_status = ExecutionStatus::Optimistic(block_hash); + + // Restore the weight of the node, it would have been set to `0` in + // `apply_score_changes` when it was invalidated. + let mut restored_weight: u64 = self + .votes + .0 + .iter() + .enumerate() + .filter_map(|(validator_index, vote)| { + if vote.current_root == node.root { + // Any voting validator that does not have a balance should be + // ignored. This is consistent with `compute_deltas`. + self.balances.get(validator_index) + } else { + None + } + }) + .sum(); + + // If the invalid root was boosted, apply the weight to it and + // ancestors. + if let Some(proposer_score_boost) = spec.proposer_score_boost { + if self.proto_array.previous_proposer_boost.root == node.root { + // Compute the score based upon the current balances. We can't rely on + // the `previous_proposr_boost.score` since it is set to zero with an + // invalid node. + let proposer_score = + calculate_proposer_boost::<E>(&self.balances, proposer_score_boost) + .ok_or("Failed to compute proposer boost")?; + // Store the score we've applied here so it can be removed in + // a later call to `apply_score_changes`. + self.proto_array.previous_proposer_boost.score = proposer_score; + // Apply this boost to this node. + restored_weight = restored_weight + .checked_add(proposer_score) + .ok_or("Overflow when adding boost to weight")?; + } + } + + // Add the restored weight to the node and all ancestors. + if restored_weight > 0 { + let mut node_or_ancestor = node; + loop { + node_or_ancestor.weight = node_or_ancestor + .weight + .checked_add(restored_weight) + .ok_or("Overflow when adding weight to ancestor")?; + + if let Some(parent_index) = node_or_ancestor.parent { + node_or_ancestor = self + .proto_array + .nodes + .get_mut(parent_index) + .ok_or(format!("Missing parent index: {}", parent_index))?; + } else { + // This is either the finalized block or a block that does not + // descend from the finalized block. + break; + } + } + } + } + // There are no balance changes required if the node was either valid or + // optimistic. + ExecutionStatus::Valid(block_hash) | ExecutionStatus::Optimistic(block_hash) => { + node.execution_status = ExecutionStatus::Optimistic(block_hash) + } + // An irrelevant node cannot become optimistic, this is a no-op. + ExecutionStatus::Irrelevant(_) => (), + } + } + + Ok(()) + } + pub fn maybe_prune(&mut self, finalized_root: Hash256) -> Result<(), String> { self.proto_array .maybe_prune(finalized_root) @@ -341,6 +482,8 @@ impl ProtoArrayForkChoice { justified_checkpoint, finalized_checkpoint, execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, }) } else { None @@ -391,8 +534,12 @@ impl ProtoArrayForkChoice { SszContainer::from(self).as_ssz_bytes() } - pub fn from_bytes(bytes: &[u8]) -> Result<Self, String> { + pub fn from_bytes( + bytes: &[u8], + count_unrealized_full: CountUnrealizedFull, + ) -> Result<Self, String> { SszContainer::from_ssz_bytes(bytes) + .map(|container| (container, count_unrealized_full)) .map(Into::into) .map_err(|e| format!("Failed to decode ProtoArrayForkChoice: {:?}", e)) } @@ -427,6 +574,7 @@ fn compute_deltas( votes: &mut ElasticList<VoteTracker>, old_balances: &[u64], new_balances: &[u64], + equivocating_indices: &BTreeSet<u64>, ) -> Result<Vec<i64>, Error> { let mut deltas = vec![0_i64; indices.len()]; @@ -437,6 +585,38 @@ fn compute_deltas( continue; } + // Handle newly slashed validators by deducting their weight from their current vote. We + // determine if they are newly slashed by checking whether their `vote.current_root` is + // non-zero. After applying the deduction a single time we set their `current_root` to zero + // and never update it again (thus preventing repeat deductions). + // + // Even if they make new attestations which are processed by `process_attestation` these + // will only update their `vote.next_root`. + if equivocating_indices.contains(&(val_index as u64)) { + // First time we've processed this slashing in fork choice: + // + // 1. Add a negative delta for their `current_root`. + // 2. Set their `current_root` (permanently) to zero. + if !vote.current_root.is_zero() { + let old_balance = old_balances.get(val_index).copied().unwrap_or(0); + + if let Some(current_delta_index) = indices.get(&vote.current_root).copied() { + let delta = deltas + .get(current_delta_index) + .ok_or(Error::InvalidNodeDelta(current_delta_index))? + .checked_sub(old_balance as i64) + .ok_or(Error::DeltaOverflow(current_delta_index))?; + + // Array access safe due to check on previous line. + deltas[current_delta_index] = delta; + } + + vote.current_root = Hash256::zero(); + } + // We've handled this slashed validator, continue without applying an ordinary delta. + continue; + } + // If the validator was not included in the _old_ balances (i.e., it did not exist yet) // then say its balance was zero. let old_balance = old_balances.get(val_index).copied().unwrap_or(0); @@ -485,6 +665,7 @@ fn compute_deltas( #[cfg(test)] mod test_compute_deltas { use super::*; + use types::MainnetEthSpec; /// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. fn hash_from_index(i: usize) -> Hash256 { @@ -510,7 +691,7 @@ mod test_compute_deltas { root: finalized_root, }; - let mut fc = ProtoArrayForkChoice::new( + let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>( genesis_slot, state_root, genesis_checkpoint, @@ -518,39 +699,50 @@ mod test_compute_deltas { junk_shuffling_id.clone(), junk_shuffling_id.clone(), execution_status, + CountUnrealizedFull::default(), ) .unwrap(); // Add block that is a finalized descendant. fc.proto_array - .on_block(Block { - slot: genesis_slot + 1, - root: finalized_desc, - parent_root: Some(finalized_root), - state_root, - target_root: finalized_root, - current_epoch_shuffling_id: junk_shuffling_id.clone(), - next_epoch_shuffling_id: junk_shuffling_id.clone(), - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, - execution_status, - }) + .on_block::<MainnetEthSpec>( + Block { + slot: genesis_slot + 1, + root: finalized_desc, + parent_root: Some(finalized_root), + state_root, + target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: Some(genesis_checkpoint), + unrealized_finalized_checkpoint: Some(genesis_checkpoint), + }, + genesis_slot + 1, + ) .unwrap(); // Add block that is *not* a finalized descendant. fc.proto_array - .on_block(Block { - slot: genesis_slot + 1, - root: not_finalized_desc, - parent_root: None, - state_root, - target_root: finalized_root, - current_epoch_shuffling_id: junk_shuffling_id.clone(), - next_epoch_shuffling_id: junk_shuffling_id, - justified_checkpoint: genesis_checkpoint, - finalized_checkpoint: genesis_checkpoint, - execution_status, - }) + .on_block::<MainnetEthSpec>( + Block { + slot: genesis_slot + 1, + root: not_finalized_desc, + parent_root: None, + state_root, + target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id, + justified_checkpoint: genesis_checkpoint, + finalized_checkpoint: genesis_checkpoint, + execution_status, + unrealized_justified_checkpoint: None, + unrealized_finalized_checkpoint: None, + }, + genesis_slot + 1, + ) .unwrap(); assert!(!fc.is_descendant(unknown, unknown)); @@ -582,6 +774,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -594,8 +787,14 @@ mod test_compute_deltas { new_balances.push(0); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -626,6 +825,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -638,8 +838,14 @@ mod test_compute_deltas { new_balances.push(BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -677,6 +883,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -689,8 +896,14 @@ mod test_compute_deltas { new_balances.push(BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -723,6 +936,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -735,8 +949,14 @@ mod test_compute_deltas { new_balances.push(BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -774,6 +994,7 @@ mod test_compute_deltas { let mut indices = HashMap::new(); let mut votes = ElasticList::default(); + let equivocating_indices = BTreeSet::new(); // There is only one block. indices.insert(hash_from_index(1), 0); @@ -796,8 +1017,14 @@ mod test_compute_deltas { next_epoch: Epoch::new(0), }); - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!(deltas.len(), 1, "deltas should have expected length"); @@ -826,6 +1053,7 @@ mod test_compute_deltas { let mut votes = ElasticList::default(); let mut old_balances = vec![]; let mut new_balances = vec![]; + let equivocating_indices = BTreeSet::new(); for i in 0..validator_count { indices.insert(hash_from_index(i), i); @@ -838,8 +1066,14 @@ mod test_compute_deltas { new_balances.push(NEW_BALANCE); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!( deltas.len(), @@ -879,6 +1113,7 @@ mod test_compute_deltas { let mut indices = HashMap::new(); let mut votes = ElasticList::default(); + let equivocating_indices = BTreeSet::new(); // There are two blocks. indices.insert(hash_from_index(1), 0); @@ -898,8 +1133,14 @@ mod test_compute_deltas { }); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!(deltas.len(), 2, "deltas should have expected length"); @@ -928,6 +1169,7 @@ mod test_compute_deltas { let mut indices = HashMap::new(); let mut votes = ElasticList::default(); + let equivocating_indices = BTreeSet::new(); // There are two blocks. indices.insert(hash_from_index(1), 0); @@ -947,8 +1189,14 @@ mod test_compute_deltas { }); } - let deltas = compute_deltas(&indices, &mut votes, &old_balances, &new_balances) - .expect("should compute deltas"); + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); assert_eq!(deltas.len(), 2, "deltas should have expected length"); @@ -969,4 +1217,72 @@ mod test_compute_deltas { ); } } + + #[test] + fn validator_equivocates() { + const OLD_BALANCE: u64 = 42; + const NEW_BALANCE: u64 = 43; + + let mut indices = HashMap::new(); + let mut votes = ElasticList::default(); + + // There are two blocks. + indices.insert(hash_from_index(1), 0); + indices.insert(hash_from_index(2), 1); + + // There are two validators. + let old_balances = vec![OLD_BALANCE; 2]; + let new_balances = vec![NEW_BALANCE; 2]; + + // Both validator move votes from block 1 to block 2. + for _ in 0..2 { + votes.0.push(VoteTracker { + current_root: hash_from_index(1), + next_root: hash_from_index(2), + next_epoch: Epoch::new(0), + }); + } + + // Validator 0 is slashed. + let equivocating_indices = BTreeSet::from_iter([0]); + + let deltas = compute_deltas( + &indices, + &mut votes, + &old_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); + + assert_eq!(deltas.len(), 2, "deltas should have expected length"); + + assert_eq!( + deltas[0], + -2 * OLD_BALANCE as i64, + "block 1 should have lost two old balances" + ); + assert_eq!( + deltas[1], NEW_BALANCE as i64, + "block 2 should have gained one balance" + ); + + // Validator 0's current root should have been reset. + assert_eq!(votes.0[0].current_root, Hash256::zero()); + assert_eq!(votes.0[0].next_root, hash_from_index(2)); + + // Validator 1's current root should have been updated. + assert_eq!(votes.0[1].current_root, hash_from_index(2)); + + // Re-computing the deltas should be a no-op (no repeat deduction for the slashed validator). + let deltas = compute_deltas( + &indices, + &mut votes, + &new_balances, + &new_balances, + &equivocating_indices, + ) + .expect("should compute deltas"); + assert_eq!(deltas, vec![0, 0]); + } } diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 7f7ef79fe8..63f75ed0a2 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,6 +1,6 @@ use crate::proto_array::ProposerBoost; use crate::{ - proto_array::{ProtoArray, ProtoNode}, + proto_array::{CountUnrealizedFull, ProtoArray, ProtoNode}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, }; use ssz::{four_byte_option_impl, Encode}; @@ -41,8 +41,8 @@ impl From<&ProtoArrayForkChoice> for SszContainer { } } -impl From<SszContainer> for ProtoArrayForkChoice { - fn from(from: SszContainer) -> Self { +impl From<(SszContainer, CountUnrealizedFull)> for ProtoArrayForkChoice { + fn from((from, count_unrealized_full): (SszContainer, CountUnrealizedFull)) -> Self { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, justified_checkpoint: from.justified_checkpoint, @@ -50,6 +50,7 @@ impl From<SszContainer> for ProtoArrayForkChoice { nodes: from.nodes, indices: from.indices.into_iter().collect::<HashMap<_, _>>(), previous_proposer_boost: from.previous_proposer_boost, + count_unrealized_full, }; Self { diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index 81e2bbe963..92b5966c9a 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -6,6 +6,7 @@ pub mod hex_vec; pub mod json_str; pub mod list_of_bytes_lists; pub mod quoted_u64_vec; +pub mod u256_hex_be; pub mod u32_hex; pub mod u64_hex_be; pub mod u8_hex; diff --git a/consensus/serde_utils/src/u256_hex_be.rs b/consensus/serde_utils/src/u256_hex_be.rs new file mode 100644 index 0000000000..8007e5792c --- /dev/null +++ b/consensus/serde_utils/src/u256_hex_be.rs @@ -0,0 +1,144 @@ +use ethereum_types::U256; + +use serde::de::Visitor; +use serde::{de, Deserializer, Serialize, Serializer}; +use std::fmt; +use std::str::FromStr; + +pub fn serialize<S>(num: &U256, serializer: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + num.serialize(serializer) +} + +pub struct U256Visitor; + +impl<'de> Visitor<'de> for U256Visitor { + type Value = String; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a well formatted hex string") + } + + fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> + where + E: de::Error, + { + if !value.starts_with("0x") { + return Err(de::Error::custom("must start with 0x")); + } + let stripped = &value[2..]; + if stripped.is_empty() { + Err(de::Error::custom(format!( + "quantity cannot be {:?}", + stripped + ))) + } else if stripped == "0" { + Ok(value.to_string()) + } else if stripped.starts_with('0') { + Err(de::Error::custom("cannot have leading zero")) + } else { + Ok(value.to_string()) + } + } +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result<U256, D::Error> +where + D: Deserializer<'de>, +{ + let decoded = deserializer.deserialize_string(U256Visitor)?; + + U256::from_str(&decoded).map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))) +} + +#[cfg(test)] +mod test { + use ethereum_types::U256; + use serde::{Deserialize, Serialize}; + use serde_json; + + #[derive(Debug, PartialEq, Serialize, Deserialize)] + #[serde(transparent)] + struct Wrapper { + #[serde(with = "super")] + val: U256, + } + + #[test] + fn encoding() { + assert_eq!( + &serde_json::to_string(&Wrapper { val: 0.into() }).unwrap(), + "\"0x0\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 1.into() }).unwrap(), + "\"0x1\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 256.into() }).unwrap(), + "\"0x100\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 65.into() }).unwrap(), + "\"0x41\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { val: 1024.into() }).unwrap(), + "\"0x400\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: U256::max_value() - 1 + }) + .unwrap(), + "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" + ); + assert_eq!( + &serde_json::to_string(&Wrapper { + val: U256::max_value() + }) + .unwrap(), + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ); + } + + #[test] + fn decoding() { + assert_eq!( + serde_json::from_str::<Wrapper>("\"0x0\"").unwrap(), + Wrapper { val: 0.into() }, + ); + assert_eq!( + serde_json::from_str::<Wrapper>("\"0x41\"").unwrap(), + Wrapper { val: 65.into() }, + ); + assert_eq!( + serde_json::from_str::<Wrapper>("\"0x400\"").unwrap(), + Wrapper { val: 1024.into() }, + ); + assert_eq!( + serde_json::from_str::<Wrapper>( + "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" + ) + .unwrap(), + Wrapper { + val: U256::max_value() - 1 + }, + ); + assert_eq!( + serde_json::from_str::<Wrapper>( + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ) + .unwrap(), + Wrapper { + val: U256::max_value() + }, + ); + serde_json::from_str::<Wrapper>("\"0x\"").unwrap_err(); + serde_json::from_str::<Wrapper>("\"0x0400\"").unwrap_err(); + serde_json::from_str::<Wrapper>("\"400\"").unwrap_err(); + serde_json::from_str::<Wrapper>("\"ff\"").unwrap_err(); + } +} diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index 078f0a96dd..d91ddabe02 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -4,7 +4,7 @@ use core::num::NonZeroUsize; use ethereum_types::{H160, H256, U128, U256}; use itertools::process_results; use smallvec::SmallVec; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use std::iter::{self, FromIterator}; use std::sync::Arc; @@ -431,6 +431,28 @@ where } } +impl<T> Decode for BTreeSet<T> +where + T: Decode + Ord, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { + if bytes.is_empty() { + Ok(Self::from_iter(iter::empty())) + } else if T::is_ssz_fixed_len() { + bytes + .chunks(T::ssz_fixed_len()) + .map(T::from_ssz_bytes) + .collect() + } else { + decode_list_of_variable_length_items(bytes, None) + } + } +} + /// Decodes `bytes` as if it were a list of variable-length items. /// /// The `ssz::SszDecoder` can also perform this functionality, however this function is diff --git a/consensus/ssz/src/decode/try_from_iter.rs b/consensus/ssz/src/decode/try_from_iter.rs index b7afe27a1c..22db02d4fc 100644 --- a/consensus/ssz/src/decode/try_from_iter.rs +++ b/consensus/ssz/src/decode/try_from_iter.rs @@ -1,5 +1,5 @@ use smallvec::SmallVec; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use std::convert::Infallible; use std::fmt::Debug; @@ -62,6 +62,20 @@ where } } +impl<T> TryFromIter<T> for BTreeSet<T> +where + T: Ord, +{ + type Error = Infallible; + + fn try_from_iter<I>(iter: I) -> Result<Self, Self::Error> + where + I: IntoIterator<Item = T>, + { + Ok(Self::from_iter(iter)) + } +} + /// Partial variant of `collect`. pub trait TryCollect: Iterator { fn try_collect<C>(self) -> Result<C, C::Error> diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index 24f23a5ae8..cfd95ba40d 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -2,7 +2,7 @@ use super::*; use core::num::NonZeroUsize; use ethereum_types::{H160, H256, U128, U256}; use smallvec::SmallVec; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; macro_rules! impl_encodable_for_uint { @@ -326,6 +326,23 @@ where } } +impl<T> Encode for BTreeSet<T> +where + T: Encode + Ord, +{ + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_bytes_len(&self) -> usize { + sequence_ssz_bytes_len(self.iter()) + } + + fn ssz_append(&self, buf: &mut Vec<u8>) { + sequence_ssz_append(self.iter(), buf) + } +} + impl Encode for bool { fn is_ssz_fixed_len() -> bool { true diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index 53aab99391..e64e76ef4d 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -353,7 +353,7 @@ mod test { let vec = vec![0, 2, 4, 6]; let fixed: FixedVector<u64, U4> = FixedVector::from(vec); - assert_eq!(fixed.get(0), Some(&0)); + assert_eq!(fixed.first(), Some(&0)); assert_eq!(fixed.get(3), Some(&6)); assert_eq!(fixed.get(4), None); } diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs index 1d062114f2..f23872c87f 100644 --- a/consensus/ssz_types/src/variable_list.rs +++ b/consensus/ssz_types/src/variable_list.rs @@ -335,7 +335,7 @@ mod test { let vec = vec![0, 2, 4, 6]; let fixed: VariableList<u64, U4> = VariableList::from(vec); - assert_eq!(fixed.get(0), Some(&0)); + assert_eq!(fixed.first(), Some(&0)); assert_eq!(fixed.get(3), Some(&6)); assert_eq!(fixed.get(4), None); } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index b265283d2b..c61a299584 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -7,12 +7,14 @@ edition = "2021" [dev-dependencies] env_logger = "0.9.0" beacon_chain = { path = "../../beacon_node/beacon_chain" } +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } [dependencies] bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" eth2_ssz = "0.4.1" +eth2_ssz_derive = "0.3.0" eth2_ssz_types = "0.2.2" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } @@ -27,6 +29,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics", optional = true lazy_static = { version = "1.4.0", optional = true } rustc-hash = "1.1.0" vec_map = "0.8.2" +derivative = "2.1.1" [features] default = ["legacy-arith", "metrics"] diff --git a/consensus/state_processing/src/common/altair.rs b/consensus/state_processing/src/common/altair.rs index d0f1fca552..4380154133 100644 --- a/consensus/state_processing/src/common/altair.rs +++ b/consensus/state_processing/src/common/altair.rs @@ -2,25 +2,43 @@ use integer_sqrt::IntegerSquareRoot; use safe_arith::{ArithError, SafeArith}; use types::*; +/// This type exists to avoid confusing `total_active_balance` with `base_reward_per_increment`, +/// since they are used in close proximity and the same type (`u64`). +#[derive(Copy, Clone)] +pub struct BaseRewardPerIncrement(u64); + +impl BaseRewardPerIncrement { + pub fn new(total_active_balance: u64, spec: &ChainSpec) -> Result<Self, ArithError> { + get_base_reward_per_increment(total_active_balance, spec).map(Self) + } + + pub fn as_u64(&self) -> u64 { + self.0 + } +} + /// Returns the base reward for some validator. /// +/// The function has a different interface to the spec since it accepts the +/// `base_reward_per_increment` without computing it each time. Avoiding the re computation has +/// shown to be a significant optimisation. +/// /// Spec v1.1.0 pub fn get_base_reward( validator_effective_balance: u64, - // Should be == get_total_active_balance(state, spec) - total_active_balance: u64, + base_reward_per_increment: BaseRewardPerIncrement, spec: &ChainSpec, ) -> Result<u64, Error> { validator_effective_balance .safe_div(spec.effective_balance_increment)? - .safe_mul(get_base_reward_per_increment(total_active_balance, spec)?) + .safe_mul(base_reward_per_increment.as_u64()) .map_err(Into::into) } /// Returns the base reward for some validator. /// /// Spec v1.1.0 -pub fn get_base_reward_per_increment( +fn get_base_reward_per_increment( total_active_balance: u64, spec: &ChainSpec, ) -> Result<u64, ArithError> { diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index fb636f861e..d7d02c3601 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -1,12 +1,10 @@ use types::*; /// Returns validator indices which participated in the attestation, sorted by increasing index. -/// -/// Spec v0.12.1 pub fn get_attesting_indices<T: EthSpec>( committee: &[usize], bitlist: &BitList<T::MaxValidatorsPerCommittee>, -) -> Result<Vec<usize>, BeaconStateError> { +) -> Result<Vec<u64>, BeaconStateError> { if bitlist.len() != committee.len() { return Err(BeaconStateError::InvalidBitfield); } @@ -15,7 +13,7 @@ pub fn get_attesting_indices<T: EthSpec>( for (i, validator_index) in committee.iter().enumerate() { if let Ok(true) = bitlist.get(i) { - indices.push(*validator_index) + indices.push(*validator_index as u64) } } @@ -23,3 +21,12 @@ pub fn get_attesting_indices<T: EthSpec>( Ok(indices) } + +/// Shortcut for getting the attesting indices while fetching the committee from the state's cache. +pub fn get_attesting_indices_from_state<T: EthSpec>( + state: &BeaconState<T>, + att: &Attestation<T>, +) -> Result<Vec<u64>, BeaconStateError> { + let committee = state.get_beacon_committee(att.data.slot, att.data.index)?; + get_attesting_indices::<T>(committee.committee, &att.aggregation_bits) +} diff --git a/consensus/state_processing/src/common/get_indexed_attestation.rs b/consensus/state_processing/src/common/get_indexed_attestation.rs index daa1c09307..63f63698e4 100644 --- a/consensus/state_processing/src/common/get_indexed_attestation.rs +++ b/consensus/state_processing/src/common/get_indexed_attestation.rs @@ -14,9 +14,7 @@ pub fn get_indexed_attestation<T: EthSpec>( let attesting_indices = get_attesting_indices::<T>(committee, &attestation.aggregation_bits)?; Ok(IndexedAttestation { - attesting_indices: VariableList::new( - attesting_indices.into_iter().map(|x| x as u64).collect(), - )?, + attesting_indices: VariableList::new(attesting_indices)?, data: attestation.data.clone(), signature: attestation.signature.clone(), }) diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 3d459f8e9b..17b193e5f4 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -10,7 +10,7 @@ pub mod base; pub use deposit_data_tree::DepositDataTree; pub use get_attestation_participation::get_attestation_participation_flag_indices; -pub use get_attesting_indices::get_attesting_indices; +pub use get_attesting_indices::{get_attesting_indices, get_attesting_indices_from_state}; pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 07878110b5..397b2ad671 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -41,7 +41,7 @@ use arbitrary::Arbitrary; /// The strategy to be used when validating the block's signatures. #[cfg_attr(feature = "arbitrary-fuzz", derive(Arbitrary))] -#[derive(PartialEq, Clone, Copy)] +#[derive(PartialEq, Clone, Copy, Debug)] pub enum BlockSignatureStrategy { /// Do not validate any signature. Use with caution. NoVerification, diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 8358003e4b..306e86714c 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -1,4 +1,4 @@ -use crate::common::{altair::get_base_reward_per_increment, decrease_balance, increase_balance}; +use crate::common::{altair::BaseRewardPerIncrement, decrease_balance, increase_balance}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; use crate::{signature_sets::sync_aggregate_signature_set, VerifySignatures}; use safe_arith::SafeArith; @@ -72,7 +72,8 @@ pub fn compute_sync_aggregate_rewards<T: EthSpec>( let total_active_balance = state.get_total_active_balance()?; let total_active_increments = total_active_balance.safe_div(spec.effective_balance_increment)?; - let total_base_rewards = get_base_reward_per_increment(total_active_balance, spec)? + let total_base_rewards = BaseRewardPerIncrement::new(total_active_balance, spec)? + .as_u64() .safe_mul(total_active_increments)?; let max_participant_rewards = total_base_rewards .safe_mul(SYNC_REWARD_WEIGHT)? diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9339afa0d4..e71ca5f8e9 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -1,7 +1,8 @@ use super::*; use crate::common::{ - altair::get_base_reward, get_attestation_participation_flag_indices, increase_balance, - initiate_validator_exit, slash_validator, + altair::{get_base_reward, BaseRewardPerIncrement}, + get_attestation_participation_flag_indices, increase_balance, initiate_validator_exit, + slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; @@ -129,6 +130,7 @@ pub mod altair { // Update epoch participation flags. let total_active_balance = state.get_total_active_balance()?; + let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; let mut proposer_reward_numerator = 0; for index in &indexed_attestation.attesting_indices { let index = *index as usize; @@ -146,7 +148,7 @@ pub mod altair { validator_participation.add_flag(flag_index)?; let effective_balance = state.get_validator(index)?.effective_balance; proposer_reward_numerator.safe_add_assign( - get_base_reward(effective_balance, total_active_balance, spec)? + get_base_reward(effective_balance, base_reward_per_increment, spec)? .safe_mul(weight)?, )?; } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 36a2bc1026..c1ceecb390 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -7,8 +7,8 @@ use crate::per_block_processing::errors::{ ProposerSlashingInvalid, }; use crate::{ - per_block_processing::process_operations, BlockSignatureStrategy, ConsensusContext, - VerifyBlockRoot, VerifySignatures, + per_block_processing::{process_operations, verify_exit::verify_exit}, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use lazy_static::lazy_static; @@ -27,7 +27,7 @@ lazy_static! { static ref KEYPAIRS: Vec<Keypair> = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); } -fn get_harness<E: EthSpec>( +async fn get_harness<E: EthSpec>( epoch_offset: u64, num_validators: usize, ) -> BeaconChainHarness<EphemeralHarnessType<E>> { @@ -41,27 +41,31 @@ fn get_harness<E: EthSpec>( .build(); let state = harness.get_current_state(); if last_slot_of_epoch > Slot::new(0) { - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (1..last_slot_of_epoch.as_u64()) - .map(Slot::new) - .collect::<Vec<_>>() - .as_slice(), - (0..num_validators).collect::<Vec<_>>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..last_slot_of_epoch.as_u64()) + .map(Slot::new) + .collect::<Vec<_>>() + .as_slice(), + (0..num_validators).collect::<Vec<_>>().as_slice(), + ) + .await; } harness } -#[test] -fn valid_block_ok() { +#[tokio::test] +async fn valid_block_ok() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let mut ctxt = ConsensusContext::new(block.slot()); let result = per_block_processing( @@ -76,15 +80,15 @@ fn valid_block_ok() { assert!(result.is_ok()); } -#[test] -fn invalid_block_header_state_slot() { +#[tokio::test] +async fn invalid_block_header_state_slot() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot() + Slot::new(1); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot); + let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot).await; let (mut block, signature) = signed_block.deconstruct(); *block.slot_mut() = slot + Slot::new(1); @@ -106,15 +110,17 @@ fn invalid_block_header_state_slot() { ); } -#[test] -fn invalid_parent_block_root() { +#[tokio::test] +async fn invalid_parent_block_root() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (signed_block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let (mut block, signature) = signed_block.deconstruct(); *block.parent_root_mut() = Hash256::from([0xAA; 32]); @@ -139,14 +145,16 @@ fn invalid_parent_block_root() { ); } -#[test] -fn invalid_block_signature() { +#[tokio::test] +async fn invalid_block_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_return_pre_state(state, slot + Slot::new(1)); + let (signed_block, mut state) = harness + .make_block_return_pre_state(state, slot + Slot::new(1)) + .await; let (block, _) = signed_block.deconstruct(); let mut ctxt = ConsensusContext::new(block.slot()); @@ -168,17 +176,19 @@ fn invalid_block_signature() { ); } -#[test] -fn invalid_randao_reveal_signature() { +#[tokio::test] +async fn invalid_randao_reveal_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let state = harness.get_current_state(); let slot = state.slot(); - let (signed_block, mut state) = harness.make_block_with_modifier(state, slot + 1, |block| { - *block.body_mut().randao_reveal_mut() = Signature::empty(); - }); + let (signed_block, mut state) = harness + .make_block_with_modifier(state, slot + 1, |block| { + *block.body_mut().randao_reveal_mut() = Signature::empty(); + }) + .await; let mut ctxt = ConsensusContext::new(signed_block.slot()); let result = per_block_processing( @@ -194,16 +204,22 @@ fn invalid_randao_reveal_signature() { assert_eq!(result, Err(BlockProcessingError::RandaoSignatureInvalid)); } -#[test] -fn valid_4_deposits() { +#[tokio::test] +async fn valid_4_deposits() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 4, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -212,16 +228,22 @@ fn valid_4_deposits() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_deposit_deposit_count_too_big() { +#[tokio::test] +async fn invalid_deposit_deposit_count_too_big() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let big_deposit_count = NUM_DEPOSITS + 1; @@ -238,16 +260,22 @@ fn invalid_deposit_deposit_count_too_big() { ); } -#[test] -fn invalid_deposit_count_too_small() { +#[tokio::test] +async fn invalid_deposit_count_too_small() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let small_deposit_count = NUM_DEPOSITS - 1; @@ -264,16 +292,22 @@ fn invalid_deposit_count_too_small() { ); } -#[test] -fn invalid_deposit_bad_merkle_proof() { +#[tokio::test] +async fn invalid_deposit_bad_merkle_proof() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let bad_index = state.eth1_deposit_index() as usize; @@ -292,17 +326,23 @@ fn invalid_deposit_bad_merkle_proof() { ); } -#[test] -fn invalid_deposit_wrong_sig() { +#[tokio::test] +async fn invalid_deposit_wrong_sig() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty())); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -310,17 +350,23 @@ fn invalid_deposit_wrong_sig() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_deposit_invalid_pub_key() { +#[tokio::test] +async fn invalid_deposit_invalid_pub_key() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None); let deposits = VariableList::from(deposits); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; *head_block.to_mut().body_mut().deposits_mut() = deposits; let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec); @@ -329,13 +375,19 @@ fn invalid_deposit_invalid_pub_key() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_attestation_no_committee_for_index() { +#[tokio::test] +async fn invalid_attestation_no_committee_for_index() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0] .data .index += 1; @@ -347,7 +399,7 @@ fn invalid_attestation_no_committee_for_index() { &spec, ); - // Expecting NoCommitee because we manually set the attestation's index to be invalid + // Expecting NoCommittee because we manually set the attestation's index to be invalid assert_eq!( result, Err(BlockProcessingError::AttestationInvalid { @@ -357,13 +409,19 @@ fn invalid_attestation_no_committee_for_index() { ); } -#[test] -fn invalid_attestation_wrong_justified_checkpoint() { +#[tokio::test] +async fn invalid_attestation_wrong_justified_checkpoint() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let old_justified_checkpoint = head_block.body().attestations()[0].data.source; let mut new_justified_checkpoint = old_justified_checkpoint; new_justified_checkpoint.epoch += Epoch::new(1); @@ -394,13 +452,19 @@ fn invalid_attestation_wrong_justified_checkpoint() { ); } -#[test] -fn invalid_attestation_bad_aggregation_bitfield_len() { +#[tokio::test] +async fn invalid_attestation_bad_aggregation_bitfield_len() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits = Bitfield::with_capacity(spec.target_committee_size).unwrap(); @@ -412,7 +476,7 @@ fn invalid_attestation_bad_aggregation_bitfield_len() { &spec, ); - // Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the commitee size. + // Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the committee size. assert_eq!( result, Err(BlockProcessingError::BeaconStateError( @@ -421,13 +485,19 @@ fn invalid_attestation_bad_aggregation_bitfield_len() { ); } -#[test] -fn invalid_attestation_bad_signature() { +#[tokio::test] +async fn invalid_attestation_bad_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, 97); // minimal number of required validators for this test + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, 97).await; // minimal number of required validators for this test let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty(); let result = process_operations::process_attestations( @@ -449,13 +519,19 @@ fn invalid_attestation_bad_signature() { ); } -#[test] -fn invalid_attestation_included_too_early() { +#[tokio::test] +async fn invalid_attestation_included_too_early() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let new_attesation_slot = head_block.body().attestations()[0].data.slot + Slot::new(MainnetEthSpec::slots_per_epoch()); head_block.to_mut().body_mut().attestations_mut()[0] @@ -484,14 +560,20 @@ fn invalid_attestation_included_too_early() { ); } -#[test] -fn invalid_attestation_included_too_late() { +#[tokio::test] +async fn invalid_attestation_included_too_late() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; let new_attesation_slot = head_block.body().attestations()[0].data.slot - Slot::new(MainnetEthSpec::slots_per_epoch()); head_block.to_mut().body_mut().attestations_mut()[0] @@ -517,14 +599,20 @@ fn invalid_attestation_included_too_late() { ); } -#[test] -fn invalid_attestation_target_epoch_slot_mismatch() { +#[tokio::test] +async fn invalid_attestation_target_epoch_slot_mismatch() { let spec = MainnetEthSpec::default_spec(); // note to maintainer: might need to increase validator count if we get NoCommittee - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut state = harness.get_current_state(); - let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0; + let mut head_block = harness + .chain + .head_beacon_block() + .as_ref() + .clone() + .deconstruct() + .0; head_block.to_mut().body_mut().attestations_mut()[0] .data .target @@ -549,10 +637,10 @@ fn invalid_attestation_target_epoch_slot_mismatch() { ); } -#[test] -fn valid_insert_attester_slashing() { +#[tokio::test] +async fn valid_insert_attester_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let attester_slashing = harness.make_attester_slashing(vec![1, 2]); @@ -568,10 +656,10 @@ fn valid_insert_attester_slashing() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_attester_slashing_not_slashable() { +#[tokio::test] +async fn invalid_attester_slashing_not_slashable() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_1 = attester_slashing.attestation_2.clone(); @@ -594,10 +682,10 @@ fn invalid_attester_slashing_not_slashable() { ); } -#[test] -fn invalid_attester_slashing_1_invalid() { +#[tokio::test] +async fn invalid_attester_slashing_1_invalid() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); @@ -623,10 +711,10 @@ fn invalid_attester_slashing_1_invalid() { ); } -#[test] -fn invalid_attester_slashing_2_invalid() { +#[tokio::test] +async fn invalid_attester_slashing_2_invalid() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); @@ -652,10 +740,10 @@ fn invalid_attester_slashing_2_invalid() { ); } -#[test] -fn valid_insert_proposer_slashing() { +#[tokio::test] +async fn valid_insert_proposer_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); let result = process_operations::process_proposer_slashings( @@ -668,10 +756,10 @@ fn valid_insert_proposer_slashing() { assert!(result.is_ok()); } -#[test] -fn invalid_proposer_slashing_proposals_identical() { +#[tokio::test] +async fn invalid_proposer_slashing_proposals_identical() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone(); @@ -694,10 +782,10 @@ fn invalid_proposer_slashing_proposals_identical() { ); } -#[test] -fn invalid_proposer_slashing_proposer_unknown() { +#[tokio::test] +async fn invalid_proposer_slashing_proposer_unknown() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.proposer_index = 3_141_592; @@ -721,10 +809,10 @@ fn invalid_proposer_slashing_proposer_unknown() { ); } -#[test] -fn invalid_proposer_slashing_duplicate_slashing() { +#[tokio::test] +async fn invalid_proposer_slashing_duplicate_slashing() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let proposer_slashing = harness.make_proposer_slashing(1); let mut state = harness.get_current_state(); @@ -752,10 +840,10 @@ fn invalid_proposer_slashing_duplicate_slashing() { ); } -#[test] -fn invalid_bad_proposal_1_signature() { +#[tokio::test] +async fn invalid_bad_proposal_1_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -776,10 +864,10 @@ fn invalid_bad_proposal_1_signature() { ); } -#[test] -fn invalid_bad_proposal_2_signature() { +#[tokio::test] +async fn invalid_bad_proposal_2_signature() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_2.signature = Signature::empty(); let mut state = harness.get_current_state(); @@ -800,10 +888,10 @@ fn invalid_bad_proposal_2_signature() { ); } -#[test] -fn invalid_proposer_slashing_proposal_epoch_mismatch() { +#[tokio::test] +async fn invalid_proposer_slashing_proposal_epoch_mismatch() { let spec = MainnetEthSpec::default_spec(); - let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT); + let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await; let mut proposer_slashing = harness.make_proposer_slashing(1); proposer_slashing.signed_header_1.message.slot = Slot::new(0); proposer_slashing.signed_header_2.message.slot = Slot::new(128); @@ -827,3 +915,70 @@ fn invalid_proposer_slashing_proposal_epoch_mismatch() { }) ); } + +#[tokio::test] +async fn fork_spanning_exit() { + let mut spec = MainnetEthSpec::default_spec(); + let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); + + spec.altair_fork_epoch = Some(Epoch::new(2)); + spec.bellatrix_fork_epoch = Some(Epoch::new(4)); + spec.shard_committee_period = 0; + + let harness = BeaconChainHarness::builder(MainnetEthSpec::default()) + .spec(spec.clone()) + .deterministic_keypairs(VALIDATOR_COUNT) + .mock_execution_layer() + .fresh_ephemeral_store() + .build(); + + harness.extend_to_slot(slots_per_epoch.into()).await; + + /* + * Produce an exit *before* Altair. + */ + + let signed_exit = harness.make_voluntary_exit(0, Epoch::new(1)); + assert!(signed_exit.message.epoch < spec.altair_fork_epoch.unwrap()); + + /* + * Ensure the exit verifies before Altair. + */ + + let head = harness.chain.canonical_head.cached_head(); + let head_state = &head.snapshot.beacon_state; + assert!(head_state.current_epoch() < spec.altair_fork_epoch.unwrap()); + verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) + .expect("phase0 exit verifies against phase0 state"); + + /* + * Ensure the exit verifies after Altair. + */ + + harness + .extend_to_slot(spec.altair_fork_epoch.unwrap().start_slot(slots_per_epoch)) + .await; + let head = harness.chain.canonical_head.cached_head(); + let head_state = &head.snapshot.beacon_state; + assert!(head_state.current_epoch() >= spec.altair_fork_epoch.unwrap()); + assert!(head_state.current_epoch() < spec.bellatrix_fork_epoch.unwrap()); + verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) + .expect("phase0 exit verifies against altair state"); + + /* + * Ensure the exit no longer verifies after Bellatrix. + */ + + harness + .extend_to_slot( + spec.bellatrix_fork_epoch + .unwrap() + .start_slot(slots_per_epoch), + ) + .await; + let head = harness.chain.canonical_head.cached_head(); + let head_state = &head.snapshot.beacon_state; + assert!(head_state.current_epoch() >= spec.bellatrix_fork_epoch.unwrap()); + verify_exit(head_state, &signed_exit, VerifySignatures::True, &spec) + .expect_err("phase0 exit does not verify against bellatrix state"); +} diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index b263a52173..1d0d42106b 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -3,6 +3,7 @@ use crate::metrics; pub use epoch_processing_summary::EpochProcessingSummary; use errors::EpochProcessingError as Error; +pub use justification_and_finalization_state::JustificationAndFinalizationState; pub use registry_updates::process_registry_updates; use safe_arith::SafeArith; pub use slashings::process_slashings; @@ -15,6 +16,7 @@ pub mod effective_balance_updates; pub mod epoch_processing_summary; pub mod errors; pub mod historical_roots_update; +pub mod justification_and_finalization_state; pub mod registry_updates; pub mod resets; pub mod slashings; diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index 89a2b81bf1..5e8bdef3e4 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -33,7 +33,9 @@ pub fn process_epoch<T: EthSpec>( let sync_committee = state.current_sync_committee()?.clone(); // Justification and finalization. - process_justification_and_finalization(state, &participation_cache)?; + let justification_and_finalization_state = + process_justification_and_finalization(state, &participation_cache)?; + justification_and_finalization_state.apply_changes_to_state(state); process_inactivity_updates(state, &mut participation_cache, spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs index dbd9126f13..8d441be70d 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs @@ -12,6 +12,7 @@ pub fn process_inactivity_updates<T: EthSpec>( participation_cache: &mut ParticipationCache, spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { + let previous_epoch = state.previous_epoch(); // Score updates based on previous epoch participation, skip genesis epoch if state.current_epoch() == T::genesis_epoch() { return Ok(()); @@ -25,7 +26,7 @@ pub fn process_inactivity_updates<T: EthSpec>( return Ok(()); } - let is_in_inactivity_leak = state.is_in_inactivity_leak(spec); + let is_in_inactivity_leak = state.is_in_inactivity_leak(previous_epoch, spec); let mut inactivity_scores = state.inactivity_scores_mut()?.iter_cow(); diff --git a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs index 88f524ec67..9c619e5770 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs @@ -1,23 +1,27 @@ use super::ParticipationCache; -use crate::per_epoch_processing::weigh_justification_and_finalization; use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::{ + weigh_justification_and_finalization, JustificationAndFinalizationState, +}; use safe_arith::SafeArith; use types::{BeaconState, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. pub fn process_justification_and_finalization<T: EthSpec>( - state: &mut BeaconState<T>, + state: &BeaconState<T>, participation_cache: &ParticipationCache, -) -> Result<(), Error> { +) -> Result<JustificationAndFinalizationState<T>, Error> { + let justification_and_finalization_state = JustificationAndFinalizationState::new(state); + if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { - return Ok(()); + return Ok(justification_and_finalization_state); } let total_active_balance = participation_cache.current_epoch_total_active_balance(); let previous_target_balance = participation_cache.previous_epoch_target_attesting_balance()?; let current_target_balance = participation_cache.current_epoch_target_attesting_balance()?; weigh_justification_and_finalization( - state, + justification_and_finalization_state, total_active_balance, previous_target_balance, current_target_balance, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs index 0fa944242d..8eda0ecc04 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs @@ -11,7 +11,7 @@ //! Additionally, this cache is returned from the `altair::process_epoch` function and can be used //! to get useful summaries about the validator participation in an epoch. -use crate::common::altair::get_base_reward; +use crate::common::altair::{get_base_reward, BaseRewardPerIncrement}; use safe_arith::{ArithError, SafeArith}; use types::milhouse::update_map::{MaxMap, UpdateMap}; use types::{ @@ -31,6 +31,7 @@ pub enum Error { MissingValidator(usize), BeaconState(BeaconStateError), Arith(ArithError), + InvalidValidatorIndex(usize), } impl From<BeaconStateError> for Error { @@ -115,11 +116,13 @@ impl SingleEpochParticipationCache { val_index: usize, validator: &Validator, epoch_participation: &ParticipationFlags, - state: &BeaconState<T>, + // FIXME(sproul): remove state argument + _state: &BeaconState<T>, + current_epoch: Epoch, relative_epoch: RelativeEpoch, ) -> Result<(), BeaconStateError> { // Sanity check to ensure the validator is active. - let epoch = relative_epoch.into_epoch(state.current_epoch()); + let epoch = relative_epoch.into_epoch(current_epoch); if !validator.is_active_at(epoch) { return Err(BeaconStateError::ValidatorIsInactive { val_index }); } @@ -220,6 +223,8 @@ impl ParticipationCache { let mut validators = ValidatorInfoCache::new(state.validators().len()); let current_epoch_total_active_balance = state.get_total_active_balance()?; + let base_reward_per_increment = + BaseRewardPerIncrement::new(current_epoch_total_active_balance, spec)?; // Contains the set of validators which are either: // @@ -257,7 +262,7 @@ impl ParticipationCache { for (val_index, (((val, curr_epoch_flags), prev_epoch_flags), inactivity_score)) in iter { let is_active_current_epoch = val.is_active_at(current_epoch); let is_active_previous_epoch = val.is_active_at(previous_epoch); - let is_eligible = state.is_eligible_validator(val); + let is_eligible = state.is_eligible_validator(previous_epoch, val); if is_active_current_epoch { current_epoch_participation.process_active_validator( @@ -265,6 +270,7 @@ impl ParticipationCache { val, curr_epoch_flags, state, + current_epoch, RelativeEpoch::Current, )?; } @@ -277,6 +283,7 @@ impl ParticipationCache { val, prev_epoch_flags, state, + current_epoch, RelativeEpoch::Previous, )?; } @@ -326,7 +333,7 @@ impl ParticipationCache { if is_eligible || is_active_current_epoch { let effective_balance = val.effective_balance; let base_reward = - get_base_reward(effective_balance, current_epoch_total_active_balance, spec)?; + get_base_reward(effective_balance, base_reward_per_increment, spec)?; validator_info.base_reward = base_reward; validators.info[val_index] = Some(validator_info); } diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index 47e34fd2b6..19987f0153 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -76,6 +76,7 @@ pub fn get_flag_index_deltas<T: EthSpec>( let unslashed_participating_increments = unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; let active_increments = total_active_balance.safe_div(spec.effective_balance_increment)?; + let previous_epoch = state.previous_epoch(); for &index in participation_cache.eligible_validator_indices() { let validator = participation_cache.get_validator(index)?; @@ -84,7 +85,7 @@ pub fn get_flag_index_deltas<T: EthSpec>( let mut delta = Delta::default(); if validator.is_unslashed_participating_index(flag_index)? { - if !state.is_in_inactivity_leak(spec) { + if !state.is_in_inactivity_leak(previous_epoch, spec) { let reward_numerator = base_reward .safe_mul(weight)? .safe_mul(unslashed_participating_increments)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index 24e60abe76..5e5188dd25 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -31,7 +31,9 @@ pub fn process_epoch<T: EthSpec>( validator_statuses.process_attestations(state)?; // Justification and finalization. - process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; + let justification_and_finalization_state = + process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; + justification_and_finalization_state.apply_changes_to_state(state); // Rewards and Penalties. process_rewards_and_penalties(state, &mut validator_statuses, spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs index 89fb506eec..9792b54507 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/justification_and_finalization.rs @@ -1,21 +1,25 @@ use crate::per_epoch_processing::base::TotalBalances; -use crate::per_epoch_processing::weigh_justification_and_finalization; use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::{ + weigh_justification_and_finalization, JustificationAndFinalizationState, +}; use safe_arith::SafeArith; use types::{BeaconState, ChainSpec, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. pub fn process_justification_and_finalization<T: EthSpec>( - state: &mut BeaconState<T>, + state: &BeaconState<T>, total_balances: &TotalBalances, _spec: &ChainSpec, -) -> Result<(), Error> { +) -> Result<JustificationAndFinalizationState<T>, Error> { + let justification_and_finalization_state = JustificationAndFinalizationState::new(state); + if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { - return Ok(()); + return Ok(justification_and_finalization_state); } weigh_justification_and_finalization( - state, + justification_and_finalization_state, total_balances.current_epoch(), total_balances.previous_epoch_target_attesters(), total_balances.current_epoch_target_attesters(), diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index 7ab35d74ab..3825567125 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -78,6 +78,7 @@ pub fn get_attestation_deltas<T: EthSpec>( validator_statuses: &ValidatorStatuses, spec: &ChainSpec, ) -> Result<Vec<AttestationDelta>, Error> { + let previous_epoch = state.previous_epoch(); let finality_delay = state .previous_epoch() .safe_sub(state.finalized_checkpoint().epoch)? @@ -94,7 +95,7 @@ pub fn get_attestation_deltas<T: EthSpec>( // eligible. // FIXME(sproul): this is inefficient let full_validator = state.get_validator(index)?; - if !state.is_eligible_validator(full_validator) { + if !state.is_eligible_validator(previous_epoch, full_validator) { continue; } diff --git a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs index b40f91ce5a..26d2536e5f 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs @@ -278,8 +278,8 @@ impl ValidatorStatuses { // Loop through the participating validator indices and update the status vec. for validator_index in attesting_indices { self.statuses - .get_mut(validator_index) - .ok_or(BeaconStateError::UnknownValidator(validator_index))? + .get_mut(validator_index as usize) + .ok_or(BeaconStateError::UnknownValidator(validator_index as usize))? .update(&status); } } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 8148747423..984f93d550 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -127,7 +127,12 @@ impl<T: EthSpec> EpochProcessingSummary<T> { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_current_epoch_timely_target_attester(val_index), + } => participation_cache + .is_current_epoch_timely_target_attester(val_index) + .or_else(|e| match e { + ParticipationCacheError::InvalidValidatorIndex(_) => Ok(false), + e => Err(e), + }), } } @@ -218,7 +223,12 @@ impl<T: EthSpec> EpochProcessingSummary<T> { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_previous_epoch_timely_target_attester(val_index), + } => participation_cache + .is_previous_epoch_timely_target_attester(val_index) + .or_else(|e| match e { + ParticipationCacheError::InvalidValidatorIndex(_) => Ok(false), + e => Err(e), + }), } } @@ -244,7 +254,12 @@ impl<T: EthSpec> EpochProcessingSummary<T> { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_previous_epoch_timely_head_attester(val_index), + } => participation_cache + .is_previous_epoch_timely_head_attester(val_index) + .or_else(|e| match e { + ParticipationCacheError::InvalidValidatorIndex(_) => Ok(false), + e => Err(e), + }), } } @@ -270,7 +285,12 @@ impl<T: EthSpec> EpochProcessingSummary<T> { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache.is_previous_epoch_timely_source_attester(val_index), + } => participation_cache + .is_previous_epoch_timely_source_attester(val_index) + .or_else(|e| match e { + ParticipationCacheError::InvalidValidatorIndex(_) => Ok(false), + e => Err(e), + }), } } diff --git a/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs new file mode 100644 index 0000000000..d8a641f464 --- /dev/null +++ b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs @@ -0,0 +1,115 @@ +use types::{BeaconState, BeaconStateError, BitVector, Checkpoint, Epoch, EthSpec, Hash256}; + +/// This is a subset of the `BeaconState` which is used to compute justification and finality +/// without modifying the `BeaconState`. +/// +/// A `JustificationAndFinalizationState` can be created from a `BeaconState` to compute +/// justification/finality changes and then applied to a `BeaconState` to enshrine those changes. +#[must_use = "this value must be applied to a state or explicitly dropped"] +pub struct JustificationAndFinalizationState<T: EthSpec> { + /* + * Immutable fields. + */ + previous_epoch: Epoch, + previous_epoch_target_root: Result<Hash256, BeaconStateError>, + current_epoch: Epoch, + current_epoch_target_root: Result<Hash256, BeaconStateError>, + /* + * Mutable fields. + */ + previous_justified_checkpoint: Checkpoint, + current_justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + justification_bits: BitVector<T::JustificationBitsLength>, +} + +impl<T: EthSpec> JustificationAndFinalizationState<T> { + pub fn new(state: &BeaconState<T>) -> Self { + let previous_epoch = state.previous_epoch(); + let current_epoch = state.current_epoch(); + Self { + previous_epoch, + previous_epoch_target_root: state.get_block_root_at_epoch(previous_epoch).copied(), + current_epoch, + current_epoch_target_root: state.get_block_root_at_epoch(current_epoch).copied(), + previous_justified_checkpoint: state.previous_justified_checkpoint(), + current_justified_checkpoint: state.current_justified_checkpoint(), + finalized_checkpoint: state.finalized_checkpoint(), + justification_bits: state.justification_bits().clone(), + } + } + + pub fn apply_changes_to_state(self, state: &mut BeaconState<T>) { + let Self { + /* + * Immutable fields do not need to be used. + */ + previous_epoch: _, + previous_epoch_target_root: _, + current_epoch: _, + current_epoch_target_root: _, + /* + * Mutable fields *must* be used. + */ + previous_justified_checkpoint, + current_justified_checkpoint, + finalized_checkpoint, + justification_bits, + } = self; + + *state.previous_justified_checkpoint_mut() = previous_justified_checkpoint; + *state.current_justified_checkpoint_mut() = current_justified_checkpoint; + *state.finalized_checkpoint_mut() = finalized_checkpoint; + *state.justification_bits_mut() = justification_bits; + } + + pub fn previous_epoch(&self) -> Epoch { + self.previous_epoch + } + + pub fn current_epoch(&self) -> Epoch { + self.current_epoch + } + + pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<Hash256, BeaconStateError> { + if epoch == self.previous_epoch { + self.previous_epoch_target_root.clone() + } else if epoch == self.current_epoch { + self.current_epoch_target_root.clone() + } else { + Err(BeaconStateError::SlotOutOfBounds) + } + } + + pub fn previous_justified_checkpoint(&self) -> Checkpoint { + self.previous_justified_checkpoint + } + + pub fn previous_justified_checkpoint_mut(&mut self) -> &mut Checkpoint { + &mut self.previous_justified_checkpoint + } + + pub fn current_justified_checkpoint_mut(&mut self) -> &mut Checkpoint { + &mut self.current_justified_checkpoint + } + + pub fn current_justified_checkpoint(&self) -> Checkpoint { + self.current_justified_checkpoint + } + + pub fn finalized_checkpoint(&self) -> Checkpoint { + self.finalized_checkpoint + } + + pub fn finalized_checkpoint_mut(&mut self) -> &mut Checkpoint { + &mut self.finalized_checkpoint + } + + pub fn justification_bits(&self) -> &BitVector<T::JustificationBitsLength> { + &self.justification_bits + } + + pub fn justification_bits_mut(&mut self) -> &mut BitVector<T::JustificationBitsLength> { + &mut self.justification_bits + } +} diff --git a/consensus/state_processing/src/per_epoch_processing/tests.rs b/consensus/state_processing/src/per_epoch_processing/tests.rs index 4379547bfe..14bbfbc071 100644 --- a/consensus/state_processing/src/per_epoch_processing/tests.rs +++ b/consensus/state_processing/src/per_epoch_processing/tests.rs @@ -6,8 +6,8 @@ use bls::Hash256; use env_logger::{Builder, Env}; use types::Slot; -#[test] -fn runs_without_error() { +#[tokio::test] +async fn runs_without_error() { Builder::from_env(Env::default().default_filter_or("error")).init(); let harness = BeaconChainHarness::builder(MinimalEthSpec) @@ -22,15 +22,17 @@ fn runs_without_error() { (MinimalEthSpec::genesis_epoch() + 4).end_slot(MinimalEthSpec::slots_per_epoch()); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (1..target_slot.as_u64()) - .map(Slot::new) - .collect::<Vec<_>>() - .as_slice(), - (0..8).collect::<Vec<_>>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (1..target_slot.as_u64()) + .map(Slot::new) + .collect::<Vec<_>>() + .as_slice(), + (0..8).collect::<Vec<_>>().as_slice(), + ) + .await; let mut new_head_state = harness.get_current_state(); process_epoch(&mut new_head_state, &spec).unwrap(); @@ -45,8 +47,8 @@ mod release_tests { use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use types::{Epoch, ForkName, InconsistentFork, MainnetEthSpec}; - #[test] - fn altair_state_on_base_fork() { + #[tokio::test] + async fn altair_state_on_base_fork() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); // The Altair fork happens at epoch 1. @@ -61,12 +63,14 @@ mod release_tests { harness.advance_slot(); - harness.extend_chain( - // Build out enough blocks so we get an Altair block at the very end of an epoch. - (slots_per_epoch * 2 - 1) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + // Build out enough blocks so we get an Altair block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.get_current_state() }; @@ -103,8 +107,8 @@ mod release_tests { ); } - #[test] - fn base_state_on_altair_fork() { + #[tokio::test] + async fn base_state_on_altair_fork() { let mut spec = MainnetEthSpec::default_spec(); let slots_per_epoch = MainnetEthSpec::slots_per_epoch(); // The Altair fork never happens. @@ -119,12 +123,14 @@ mod release_tests { harness.advance_slot(); - harness.extend_chain( - // Build out enough blocks so we get a block at the very end of an epoch. - (slots_per_epoch * 2 - 1) as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ); + harness + .extend_chain( + // Build out enough blocks so we get a block at the very end of an epoch. + (slots_per_epoch * 2 - 1) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; harness.get_current_state() }; diff --git a/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs index 6e90ee8f37..96f6a8ef14 100644 --- a/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/weigh_justification_and_finalization.rs @@ -1,16 +1,16 @@ -use crate::per_epoch_processing::Error; +use crate::per_epoch_processing::{Error, JustificationAndFinalizationState}; use safe_arith::SafeArith; use std::ops::Range; -use types::{BeaconState, Checkpoint, EthSpec}; +use types::{Checkpoint, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. #[allow(clippy::if_same_then_else)] // For readability and consistency with spec. pub fn weigh_justification_and_finalization<T: EthSpec>( - state: &mut BeaconState<T>, + mut state: JustificationAndFinalizationState<T>, total_active_balance: u64, previous_target_balance: u64, current_target_balance: u64, -) -> Result<(), Error> { +) -> Result<JustificationAndFinalizationState<T>, Error> { let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); @@ -24,7 +24,7 @@ pub fn weigh_justification_and_finalization<T: EthSpec>( if previous_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { *state.current_justified_checkpoint_mut() = Checkpoint { epoch: previous_epoch, - root: *state.get_block_root_at_epoch(previous_epoch)?, + root: state.get_block_root_at_epoch(previous_epoch)?, }; state.justification_bits_mut().set(1, true)?; } @@ -32,7 +32,7 @@ pub fn weigh_justification_and_finalization<T: EthSpec>( if current_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { *state.current_justified_checkpoint_mut() = Checkpoint { epoch: current_epoch, - root: *state.get_block_root_at_epoch(current_epoch)?, + root: state.get_block_root_at_epoch(current_epoch)?, }; state.justification_bits_mut().set(0, true)?; } @@ -66,5 +66,5 @@ pub fn weigh_justification_and_finalization<T: EthSpec>( *state.finalized_checkpoint_mut() = old_current_justified_checkpoint; } - Ok(()) + Ok(state) } diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 56b37e645c..d5b28330e3 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -32,8 +32,8 @@ pub fn translate_participation<E: EthSpec>( for index in attesting_indices { for flag_index in &participation_flag_indices { epoch_participation - .get_mut(index) - .ok_or(Error::UnknownValidator(index))? + .get_mut(index as usize) + .ok_or(Error::UnknownValidator(index as usize))? .add_flag(*flag_index)?; } } diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index 25c2839edd..80dee28f62 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -5,36 +5,120 @@ use crate::per_block_processing::{ verify_attester_slashing, verify_exit, verify_proposer_slashing, }; use crate::VerifySignatures; +use derivative::Derivative; +use smallvec::{smallvec, SmallVec}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::marker::PhantomData; use types::{ - AttesterSlashing, BeaconState, ChainSpec, EthSpec, ProposerSlashing, SignedVoluntaryExit, + AttesterSlashing, BeaconState, ChainSpec, Epoch, EthSpec, Fork, ForkVersion, ProposerSlashing, + SignedVoluntaryExit, }; +const MAX_FORKS_VERIFIED_AGAINST: usize = 2; + /// Wrapper around an operation type that acts as proof that its signature has been checked. /// -/// The inner field is private, meaning instances of this type can only be constructed +/// The inner `op` field is private, meaning instances of this type can only be constructed /// by calling `validate`. -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct SigVerifiedOp<T>(T); +#[derive(Derivative, Debug, Clone, Encode, Decode)] +#[derivative( + PartialEq, + Eq, + Hash(bound = "T: Encode + Decode + std::hash::Hash, E: EthSpec") +)] +pub struct SigVerifiedOp<T: Encode + Decode, E: EthSpec> { + op: T, + verified_against: VerifiedAgainst, + #[ssz(skip_serializing, skip_deserializing)] + _phantom: PhantomData<E>, +} + +/// Information about the fork versions that this message was verified against. +/// +/// In general it is not safe to assume that a `SigVerifiedOp` constructed at some point in the past +/// will continue to be valid in the presence of a changing `state.fork()`. The reason for this +/// is that the fork versions that the message's epochs map to might change. +/// +/// For example a proposer slashing at a phase0 slot verified against an Altair state will use +/// the phase0 fork version, but will become invalid once the Bellatrix fork occurs because that +/// slot will start to map to the Altair fork version. This is because `Fork::get_fork_version` only +/// remembers the most recent two forks. +/// +/// In the other direction, a proposer slashing at a Bellatrix slot verified against an Altair state +/// will use the Altair fork version, but will become invalid once the Bellatrix fork occurs because +/// that slot will start to map to the Bellatrix fork version. +/// +/// We need to store multiple `ForkVersion`s because attester slashings contain two indexed +/// attestations which may be signed using different versions. +#[derive(Debug, PartialEq, Eq, Clone, Hash, Encode, Decode)] +pub struct VerifiedAgainst { + fork_versions: SmallVec<[ForkVersion; MAX_FORKS_VERIFIED_AGAINST]>, +} + +impl<T, E> SigVerifiedOp<T, E> +where + T: VerifyOperation<E>, + E: EthSpec, +{ + /// This function must be private because it assumes that `op` has already been verified. + fn new(op: T, state: &BeaconState<E>) -> Self { + let verified_against = VerifiedAgainst { + fork_versions: op + .verification_epochs() + .into_iter() + .map(|epoch| state.fork().get_fork_version(epoch)) + .collect(), + }; + + SigVerifiedOp { + op, + verified_against, + _phantom: PhantomData, + } + } -impl<T> SigVerifiedOp<T> { pub fn into_inner(self) -> T { - self.0 + self.op } pub fn as_inner(&self) -> &T { - &self.0 + &self.op + } + + pub fn signature_is_still_valid(&self, current_fork: &Fork) -> bool { + self.as_inner() + .verification_epochs() + .into_iter() + .zip(self.verified_against.fork_versions.iter()) + .all(|(epoch, verified_fork_version)| { + current_fork.get_fork_version(epoch) == *verified_fork_version + }) + } + + /// Return one of the fork versions this message was verified against. + /// + /// This is only required for the v12 schema downgrade and can be deleted once all nodes + /// are upgraded to v12. + pub fn first_fork_verified_against(&self) -> Option<ForkVersion> { + self.verified_against.fork_versions.first().copied() } } /// Trait for operations that can be verified and transformed into a `SigVerifiedOp`. -pub trait VerifyOperation<E: EthSpec>: Sized { +pub trait VerifyOperation<E: EthSpec>: Encode + Decode + Sized { type Error; fn validate( self, state: &BeaconState<E>, spec: &ChainSpec, - ) -> Result<SigVerifiedOp<Self>, Self::Error>; + ) -> Result<SigVerifiedOp<Self, E>, Self::Error>; + + /// Return the epochs at which parts of this message were verified. + /// + /// These need to map 1-to-1 to the `SigVerifiedOp::verified_against` for this type. + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]>; } impl<E: EthSpec> VerifyOperation<E> for SignedVoluntaryExit { @@ -44,9 +128,14 @@ impl<E: EthSpec> VerifyOperation<E> for SignedVoluntaryExit { self, state: &BeaconState<E>, spec: &ChainSpec, - ) -> Result<SigVerifiedOp<Self>, Self::Error> { + ) -> Result<SigVerifiedOp<Self, E>, Self::Error> { verify_exit(state, &self, VerifySignatures::True, spec)?; - Ok(SigVerifiedOp(self)) + Ok(SigVerifiedOp::new(self, state)) + } + + #[allow(clippy::integer_arithmetic)] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![self.message.epoch] } } @@ -57,9 +146,17 @@ impl<E: EthSpec> VerifyOperation<E> for AttesterSlashing<E> { self, state: &BeaconState<E>, spec: &ChainSpec, - ) -> Result<SigVerifiedOp<Self>, Self::Error> { + ) -> Result<SigVerifiedOp<Self, E>, Self::Error> { verify_attester_slashing(state, &self, VerifySignatures::True, spec)?; - Ok(SigVerifiedOp(self)) + Ok(SigVerifiedOp::new(self, state)) + } + + #[allow(clippy::integer_arithmetic)] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + smallvec![ + self.attestation_1.data.target.epoch, + self.attestation_2.data.target.epoch + ] } } @@ -70,8 +167,18 @@ impl<E: EthSpec> VerifyOperation<E> for ProposerSlashing { self, state: &BeaconState<E>, spec: &ChainSpec, - ) -> Result<SigVerifiedOp<Self>, Self::Error> { + ) -> Result<SigVerifiedOp<Self, E>, Self::Error> { verify_proposer_slashing(&self, state, VerifySignatures::True, spec)?; - Ok(SigVerifiedOp(self)) + Ok(SigVerifiedOp::new(self, state)) + } + + #[allow(clippy::integer_arithmetic)] + fn verification_epochs(&self) -> SmallVec<[Epoch; MAX_FORKS_VERIFIED_AGAINST]> { + // Only need a single epoch because the slots of the two headers must be equal. + smallvec![self + .signed_header_1 + .message + .slot + .epoch(E::slots_per_epoch())] } } diff --git a/consensus/tree_hash/examples/flamegraph_beacon_state.rs b/consensus/tree_hash/examples/flamegraph_beacon_state.rs index cb9fc9390a..e5b505bb91 100644 --- a/consensus/tree_hash/examples/flamegraph_beacon_state.rs +++ b/consensus/tree_hash/examples/flamegraph_beacon_state.rs @@ -17,7 +17,7 @@ fn get_harness<T: EthSpec>() -> BeaconChainHarness<EphemeralHarnessType<T>> { } fn build_state<T: EthSpec>() -> BeaconState<T> { - let state = get_harness::<T>().chain.head_beacon_state().unwrap(); + let state = get_harness::<T>().chain.head_beacon_state_cloned(); assert_eq!(state.as_base().unwrap().validators.len(), VALIDATOR_COUNT); assert_eq!(state.as_base().unwrap().balances.len(), VALIDATOR_COUNT); diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 6a156f9ae0..41e9127657 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -48,12 +48,15 @@ serde_json = "1.0.74" smallvec = "1.8.0" milhouse = { git = "https://github.com/sigp/milhouse", branch = "main" } rpds = "0.11.0" +serde_with = "1.13.0" +maplit = "1.0.2" [dev-dependencies] criterion = "0.3.3" beacon_chain = { path = "../../beacon_node/beacon_chain" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } state_processing = { path = "../state_processing" } +tokio = "1.14.0" [features] default = ["sqlite", "legacy-arith"] diff --git a/consensus/types/src/application_domain.rs b/consensus/types/src/application_domain.rs new file mode 100644 index 0000000000..5e33f2dfd5 --- /dev/null +++ b/consensus/types/src/application_domain.rs @@ -0,0 +1,16 @@ +/// This value is an application index of 0 with the bitmask applied (so it's equivalent to the bit mask). +/// Little endian hex: 0x00000001, Binary: 1000000000000000000000000 +pub const APPLICATION_DOMAIN_BUILDER: u32 = 16777216; + +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum ApplicationDomain { + Builder, +} + +impl ApplicationDomain { + pub fn get_domain_constant(&self) -> u32 { + match self { + ApplicationDomain::Builder => APPLICATION_DOMAIN_BUILDER, + } + } +} diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 1957c34eaa..70cf0812d4 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -38,7 +38,7 @@ use tree_hash_derive::TreeHash; derive(Debug, PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent") ), - map_ref_into(BeaconBlockBodyRef), + map_ref_into(BeaconBlockBodyRef, BeaconBlock), map_ref_mut_into(BeaconBlockBodyRefMut) )] #[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] @@ -541,6 +541,50 @@ impl_from!(BeaconBlockBase, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: impl_from!(BeaconBlockAltair, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyAltair<_, _>| body.into()); impl_from!(BeaconBlockMerge, <E, FullPayload<E>>, <E, BlindedPayload<E>>, |body: BeaconBlockBodyMerge<_, _>| body.into()); +// We can clone blocks with payloads to blocks without payloads, without cloning the payload. +macro_rules! impl_clone_as_blinded { + ($ty_name:ident, <$($from_params:ty),*>, <$($to_params:ty),*>) => { + impl<E: EthSpec> $ty_name<$($from_params),*> + { + pub fn clone_as_blinded(&self) -> $ty_name<$($to_params),*> { + let $ty_name { + slot, + proposer_index, + parent_root, + state_root, + body, + } = self; + + $ty_name { + slot: *slot, + proposer_index: *proposer_index, + parent_root: *parent_root, + state_root: *state_root, + body: body.clone_as_blinded(), + } + } + } + } +} + +impl_clone_as_blinded!(BeaconBlockBase, <E, FullPayload<E>>, <E, BlindedPayload<E>>); +impl_clone_as_blinded!(BeaconBlockAltair, <E, FullPayload<E>>, <E, BlindedPayload<E>>); +impl_clone_as_blinded!(BeaconBlockMerge, <E, FullPayload<E>>, <E, BlindedPayload<E>>); + +// A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the +// execution payload. +impl<'a, E: EthSpec> From<BeaconBlockRef<'a, E, FullPayload<E>>> + for BeaconBlock<E, BlindedPayload<E>> +{ + fn from( + full_block: BeaconBlockRef<'a, E, FullPayload<E>>, + ) -> BeaconBlock<E, BlindedPayload<E>> { + map_beacon_block_ref_into_beacon_block!(&'a _, full_block, |inner, cons| { + cons(inner.clone_as_blinded()) + }) + } +} + impl<E: EthSpec> From<BeaconBlock<E, FullPayload<E>>> for ( BeaconBlock<E, BlindedPayload<E>>, @@ -607,19 +651,17 @@ mod tests { #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; + let spec = E::default_spec(); let rng = &mut XorShiftRng::from_seed([42; 16]); - let fork_epoch = Epoch::from_ssz_bytes(&[7, 6, 5, 4, 3, 2, 1, 0]).unwrap(); + let fork_epoch = spec.altair_fork_epoch.unwrap(); let base_epoch = fork_epoch.saturating_sub(1_u64); let base_slot = base_epoch.end_slot(E::slots_per_epoch()); let altair_epoch = fork_epoch; let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(fork_epoch); - // BeaconBlockBase { let good_base_block = BeaconBlock::Base(BeaconBlockBase { diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 34761ea9a7..381a9bd43e 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -251,6 +251,53 @@ impl<E: EthSpec> From<BeaconBlockBodyMerge<E, FullPayload<E>>> } } +// We can clone a full block into a blinded block, without cloning the payload. +impl<E: EthSpec> BeaconBlockBodyBase<E, FullPayload<E>> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase<E, BlindedPayload<E>> { + let (block_body, _payload) = self.clone().into(); + block_body + } +} + +impl<E: EthSpec> BeaconBlockBodyAltair<E, FullPayload<E>> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyAltair<E, BlindedPayload<E>> { + let (block_body, _payload) = self.clone().into(); + block_body + } +} + +impl<E: EthSpec> BeaconBlockBodyMerge<E, FullPayload<E>> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyMerge<E, BlindedPayload<E>> { + let BeaconBlockBodyMerge { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayload { execution_payload }, + } = self; + + BeaconBlockBodyMerge { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayload { + execution_payload_header: From::from(execution_payload), + }, + } + } +} + impl<E: EthSpec> From<BeaconBlockBody<E, FullPayload<E>>> for ( BeaconBlockBody<E, BlindedPayload<E>>, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 1e4643b13a..4b5ffbca61 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -985,6 +985,13 @@ impl<T: EthSpec> BeaconState<T> { } } + /// Return the minimum epoch for which `get_randao_mix` will return a non-error value. + pub fn min_randao_epoch(&self) -> Epoch { + self.current_epoch() + .saturating_add(1u64) + .saturating_sub(T::EpochsPerHistoricalVector::to_u64()) + } + /// XOR-assigns the existing `epoch` randao mix with the hash of the `signature`. /// /// # Errors: @@ -1625,15 +1632,17 @@ impl<T: EthSpec> BeaconState<T> { Ok(self.validators().tree_hash_root()) } - pub fn is_eligible_validator(&self, val: &Validator) -> bool { - let previous_epoch = self.previous_epoch(); + /// Passing `previous_epoch` to this function rather than computing it internally provides + /// a tangible speed improvement in state processing. + pub fn is_eligible_validator(&self, previous_epoch: Epoch, val: &Validator) -> bool { val.is_active_at(previous_epoch) || (val.slashed && previous_epoch + Epoch::new(1) < val.withdrawable_epoch) } - pub fn is_in_inactivity_leak(&self, spec: &ChainSpec) -> bool { - (self.previous_epoch() - self.finalized_checkpoint().epoch) - > spec.min_epochs_to_inactivity_penalty + /// Passing `previous_epoch` to this function rather than computing it internally provides + /// a tangible speed improvement in state processing. + pub fn is_in_inactivity_leak(&self, previous_epoch: Epoch, spec: &ChainSpec) -> bool { + (previous_epoch - self.finalized_checkpoint().epoch) > spec.min_epochs_to_inactivity_penalty } /// Get the `SyncCommittee` associated with the next slot. Useful because sync committees diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 19009310e1..4c613c3f54 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -39,8 +39,18 @@ impl CommitteeCache { epoch: Epoch, spec: &ChainSpec, ) -> Result<Arc<CommitteeCache>, Error> { - RelativeEpoch::from_epoch(state.current_epoch(), epoch) - .map_err(|_| Error::EpochOutOfBounds)?; + // Check that the cache is being built for an in-range epoch. + // + // We allow caches to be constructed for historic epochs, per: + // + // https://github.com/sigp/lighthouse/issues/3270 + let reqd_randao_epoch = epoch + .saturating_sub(spec.min_seed_lookahead) + .saturating_sub(1u64); + + if reqd_randao_epoch < state.min_randao_epoch() || epoch > state.current_epoch() + 1 { + return Err(Error::EpochOutOfBounds); + } // May cause divide-by-zero errors. if T::slots_per_epoch() == 0 { diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index 8c46397c04..eea6233e35 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -34,32 +34,34 @@ fn default_values() { assert!(cache.get_beacon_committees_at_slot(Slot::new(0)).is_err()); } -fn new_state<T: EthSpec>(validator_count: usize, slot: Slot) -> BeaconState<T> { +async fn new_state<T: EthSpec>(validator_count: usize, slot: Slot) -> BeaconState<T> { let harness = get_harness(validator_count); let head_state = harness.get_current_state(); if slot > Slot::new(0) { - harness.add_attested_blocks_at_slots( - head_state, - Hash256::zero(), - (1..slot.as_u64()) - .map(Slot::new) - .collect::<Vec<_>>() - .as_slice(), - (0..validator_count).collect::<Vec<_>>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + head_state, + Hash256::zero(), + (1..=slot.as_u64()) + .map(Slot::new) + .collect::<Vec<_>>() + .as_slice(), + (0..validator_count).collect::<Vec<_>>().as_slice(), + ) + .await; } harness.get_current_state() } -#[test] +#[tokio::test] #[should_panic] -fn fails_without_validators() { - new_state::<MinimalEthSpec>(0, Slot::new(0)); +async fn fails_without_validators() { + new_state::<MinimalEthSpec>(0, Slot::new(0)).await; } -#[test] -fn initializes_with_the_right_epoch() { - let state = new_state::<MinimalEthSpec>(16, Slot::new(0)); +#[tokio::test] +async fn initializes_with_the_right_epoch() { + let state = new_state::<MinimalEthSpec>(16, Slot::new(0)).await; let spec = &MinimalEthSpec::default_spec(); let cache = CommitteeCache::default(); @@ -75,17 +77,20 @@ fn initializes_with_the_right_epoch() { assert!(cache.is_initialized_at(state.next_epoch().unwrap())); } -#[test] -fn shuffles_for_the_right_epoch() { +#[tokio::test] +async fn shuffles_for_the_right_epoch() { let num_validators = MinimalEthSpec::minimum_validator_count() * 2; let epoch = Epoch::new(6); let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch()); - let mut state = new_state::<MinimalEthSpec>(num_validators, slot); + let mut state = new_state::<MinimalEthSpec>(num_validators, slot).await; let spec = &MinimalEthSpec::default_spec(); - let distinct_hashes = (0..MinimalEthSpec::epochs_per_historical_vector()) - .map(|i| Hash256::from_low_u64_be(i as u64)); + assert_eq!(state.current_epoch(), epoch); + + let distinct_hashes: Vec<Hash256> = (0..MinimalEthSpec::epochs_per_historical_vector()) + .map(|i| Hash256::from_low_u64_be(i as u64)) + .collect(); *state.randao_mixes_mut() = FixedVector::try_from_iter(distinct_hashes).unwrap(); @@ -121,15 +126,41 @@ fn shuffles_for_the_right_epoch() { } }; - let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling(), shuffling_with_seed(current_seed)); - assert_shuffling_positions_accurate(&cache); + // We can initialize the committee cache at recent epochs in the past, and one epoch into the + // future. + for e in (0..=epoch.as_u64() + 1).map(Epoch::new) { + let seed = state.get_seed(e, Domain::BeaconAttester, spec).unwrap(); + let cache = CommitteeCache::initialized(&state, e, spec) + .unwrap_or_else(|_| panic!("failed at epoch {}", e)); + assert_eq!(cache.shuffling(), shuffling_with_seed(seed)); + assert_shuffling_positions_accurate(&cache); + } - let cache = CommitteeCache::initialized(&state, state.previous_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling(), shuffling_with_seed(previous_seed)); - assert_shuffling_positions_accurate(&cache); - - let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), spec).unwrap(); - assert_eq!(cache.shuffling(), shuffling_with_seed(next_seed)); - assert_shuffling_positions_accurate(&cache); + // We should *not* be able to build a committee cache for the epoch after the next epoch. + assert_eq!( + CommitteeCache::initialized(&state, epoch + 2, spec), + Err(BeaconStateError::EpochOutOfBounds) + ); +} + +#[tokio::test] +async fn min_randao_epoch_correct() { + let num_validators = MinimalEthSpec::minimum_validator_count() * 2; + let current_epoch = Epoch::new(MinimalEthSpec::epochs_per_historical_vector() as u64 * 2); + + let mut state = new_state::<MinimalEthSpec>( + num_validators, + Epoch::new(1).start_slot(MinimalEthSpec::slots_per_epoch()), + ) + .await; + + // Override the epoch so that there's some room to move. + *state.slot_mut() = current_epoch.start_slot(MinimalEthSpec::slots_per_epoch()); + assert_eq!(state.current_epoch(), current_epoch); + + // The min_randao_epoch should be the minimum epoch such that `get_randao_mix` returns `Ok`. + let min_randao_epoch = state.min_randao_epoch(); + state.get_randao_mix(min_randao_epoch).unwrap(); + state.get_randao_mix(min_randao_epoch - 1).unwrap_err(); + state.get_randao_mix(min_randao_epoch + 1).unwrap(); } diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index f47b4349bc..38b9d34c2f 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -7,7 +7,9 @@ use beacon_chain::types::{ ChainSpec, Domain, Epoch, EthSpec, FixedVector, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, }; +use safe_arith::SafeArith; use ssz::{Decode, Encode}; +use state_processing::per_slot_processing; use std::ops::Mul; use swap_or_not_shuffle::compute_shuffled_index; use tree_hash::TreeHash; @@ -20,7 +22,7 @@ lazy_static! { static ref KEYPAIRS: Vec<Keypair> = generate_deterministic_keypairs(MAX_VALIDATOR_COUNT); } -fn get_harness<E: EthSpec>( +async fn get_harness<E: EthSpec>( validator_count: usize, slot: Slot, ) -> BeaconChainHarness<EphemeralHarnessType<E>> { @@ -36,24 +38,26 @@ fn get_harness<E: EthSpec>( .map(Slot::new) .collect::<Vec<_>>(); let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - slots.as_slice(), - (0..validator_count).collect::<Vec<_>>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + slots.as_slice(), + (0..validator_count).collect::<Vec<_>>().as_slice(), + ) + .await; } harness } -fn build_state<E: EthSpec>(validator_count: usize) -> BeaconState<E> { +async fn build_state<E: EthSpec>(validator_count: usize) -> BeaconState<E> { get_harness(validator_count, Slot::new(0)) + .await .chain - .head_beacon_state() - .unwrap() + .head_beacon_state_cloned() } -fn test_beacon_proposer_index<T: EthSpec>() { +async fn test_beacon_proposer_index<T: EthSpec>() { let spec = T::default_spec(); // Get the i'th candidate proposer for the given state and slot @@ -80,20 +84,20 @@ fn test_beacon_proposer_index<T: EthSpec>() { // Test where we have one validator per slot. // 0th candidate should be chosen every time. - let state = build_state(T::slots_per_epoch() as usize); + let state = build_state(T::slots_per_epoch() as usize).await; for i in 0..T::slots_per_epoch() { test(&state, Slot::from(i), 0); } // Test where we have two validators per slot. // 0th candidate should be chosen every time. - let state = build_state((T::slots_per_epoch() as usize).mul(2)); + let state = build_state((T::slots_per_epoch() as usize).mul(2)).await; for i in 0..T::slots_per_epoch() { test(&state, Slot::from(i), 0); } // Test with two validators per slot, first validator has zero balance. - let mut state = build_state::<T>((T::slots_per_epoch() as usize).mul(2)); + let mut state = build_state::<T>((T::slots_per_epoch() as usize).mul(2)).await; let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec); state .validators_mut() @@ -106,9 +110,9 @@ fn test_beacon_proposer_index<T: EthSpec>() { } } -#[test] -fn beacon_proposer_index() { - test_beacon_proposer_index::<MinimalEthSpec>(); +#[tokio::test] +async fn beacon_proposer_index() { + test_beacon_proposer_index::<MinimalEthSpec>().await; } /// Test that @@ -143,11 +147,11 @@ fn test_cache_initialization<T: EthSpec>( ); } -#[test] -fn cache_initialization() { +#[tokio::test] +async fn cache_initialization() { let spec = MinimalEthSpec::default_spec(); - let mut state = build_state::<MinimalEthSpec>(16); + let mut state = build_state::<MinimalEthSpec>(16).await; *state.slot_mut() = (MinimalEthSpec::genesis_epoch() + 1).start_slot(MinimalEthSpec::slots_per_epoch()); @@ -236,7 +240,7 @@ mod committees { assert!(expected_indices_iter.next().is_none()); } - fn committee_consistency_test<T: EthSpec>( + async fn committee_consistency_test<T: EthSpec>( validator_count: usize, state_epoch: Epoch, cache_epoch: RelativeEpoch, @@ -244,7 +248,7 @@ mod committees { let spec = &T::default_spec(); let slot = state_epoch.start_slot(T::slots_per_epoch()); - let harness = get_harness::<T>(validator_count, slot); + let harness = get_harness::<T>(validator_count, slot).await; let mut new_head_state = harness.get_current_state(); let distinct_hashes = @@ -271,7 +275,7 @@ mod committees { ); } - fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) { + async fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) { let spec = T::default_spec(); let validator_count = spec @@ -280,13 +284,15 @@ mod committees { .mul(spec.target_committee_size) .add(1); - committee_consistency_test::<T>(validator_count as usize, Epoch::new(0), cached_epoch); + committee_consistency_test::<T>(validator_count as usize, Epoch::new(0), cached_epoch) + .await; committee_consistency_test::<T>( validator_count as usize, T::genesis_epoch() + 4, cached_epoch, - ); + ) + .await; committee_consistency_test::<T>( validator_count as usize, @@ -295,38 +301,39 @@ mod committees { .mul(T::slots_per_epoch()) .mul(4), cached_epoch, - ); + ) + .await; } - #[test] - fn current_epoch_committee_consistency() { - committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Current); + #[tokio::test] + async fn current_epoch_committee_consistency() { + committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Current).await; } - #[test] - fn previous_epoch_committee_consistency() { - committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Previous); + #[tokio::test] + async fn previous_epoch_committee_consistency() { + committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Previous).await; } - #[test] - fn next_epoch_committee_consistency() { - committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Next); + #[tokio::test] + async fn next_epoch_committee_consistency() { + committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Next).await; } } mod get_outstanding_deposit_len { use super::*; - fn state() -> BeaconState<MinimalEthSpec> { + async fn state() -> BeaconState<MinimalEthSpec> { get_harness(16, Slot::new(0)) + .await .chain - .head_beacon_state() - .unwrap() + .head_beacon_state_cloned() } - #[test] - fn returns_ok() { - let mut state = state(); + #[tokio::test] + async fn returns_ok() { + let mut state = state().await; assert_eq!(state.get_outstanding_deposit_len(), Ok(0)); state.eth1_data_mut().deposit_count = 17; @@ -334,9 +341,9 @@ mod get_outstanding_deposit_len { assert_eq!(state.get_outstanding_deposit_len(), Ok(1)); } - #[test] - fn returns_err_if_the_state_is_invalid() { - let mut state = state(); + #[tokio::test] + async fn returns_err_if_the_state_is_invalid() { + let mut state = state().await; // The state is invalid, deposit count is lower than deposit index. state.eth1_data_mut().deposit_count = 16; *state.eth1_deposit_index_mut() = 17; @@ -354,62 +361,60 @@ mod get_outstanding_deposit_len { #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; + let spec = E::default_spec(); let rng = &mut XorShiftRng::from_seed([42; 16]); - let fork_epoch = Epoch::from_ssz_bytes(&[7, 6, 5, 4, 3, 2, 1, 0]).unwrap(); + let fork_epoch = spec.altair_fork_epoch.unwrap(); let base_epoch = fork_epoch.saturating_sub(1_u64); let base_slot = base_epoch.end_slot(E::slots_per_epoch()); let altair_epoch = fork_epoch; let altair_slot = altair_epoch.start_slot(E::slots_per_epoch()); - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(altair_epoch); - // BeaconStateBase { - let good_base_block: BeaconState<MainnetEthSpec> = BeaconState::Base(BeaconStateBase { + let good_base_state: BeaconState<MainnetEthSpec> = BeaconState::Base(BeaconStateBase { slot: base_slot, ..<_>::random_for_test(rng) }); - // It's invalid to have a base block with a slot higher than the fork slot. - let bad_base_block = { - let mut bad = good_base_block.clone(); + // It's invalid to have a base state with a slot higher than the fork slot. + let bad_base_state = { + let mut bad = good_base_state.clone(); *bad.slot_mut() = altair_slot; bad }; assert_eq!( - BeaconState::from_ssz_bytes(&good_base_block.as_ssz_bytes(), &spec) - .expect("good base block can be decoded"), - good_base_block + BeaconState::from_ssz_bytes(&good_base_state.as_ssz_bytes(), &spec) + .expect("good base state can be decoded"), + good_base_state ); - <BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_base_block.as_ssz_bytes(), &spec) - .expect_err("bad base block cannot be decoded"); + <BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_base_state.as_ssz_bytes(), &spec) + .expect_err("bad base state cannot be decoded"); } // BeaconStateAltair { - let good_altair_block: BeaconState<MainnetEthSpec> = + let good_altair_state: BeaconState<MainnetEthSpec> = BeaconState::Altair(BeaconStateAltair { slot: altair_slot, ..<_>::random_for_test(rng) }); - // It's invalid to have an Altair block with a slot lower than the fork slot. - let bad_altair_block = { - let mut bad = good_altair_block.clone(); + // It's invalid to have an Altair state with a slot lower than the fork slot. + let bad_altair_state = { + let mut bad = good_altair_state.clone(); *bad.slot_mut() = base_slot; bad }; assert_eq!( - BeaconState::from_ssz_bytes(&good_altair_block.as_ssz_bytes(), &spec) - .expect("good altair block can be decoded"), - good_altair_block + BeaconState::from_ssz_bytes(&good_altair_state.as_ssz_bytes(), &spec) + .expect("good altair state can be decoded"), + good_altair_state ); - <BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_altair_block.as_ssz_bytes(), &spec) - .expect_err("bad altair block cannot be decoded"); + <BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_altair_state.as_ssz_bytes(), &spec) + .expect_err("bad altair state cannot be decoded"); } } diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs new file mode 100644 index 0000000000..047bceae7e --- /dev/null +++ b/consensus/types/src/builder_bid.rs @@ -0,0 +1,70 @@ +use crate::{ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, Uint256}; +use bls::PublicKeyBytes; +use bls::Signature; +use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; +use serde_derive::{Deserialize, Serialize}; +use serde_with::{serde_as, DeserializeAs, SerializeAs}; +use std::marker::PhantomData; +use tree_hash_derive::TreeHash; + +#[serde_as] +#[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] +#[serde(bound = "E: EthSpec, Payload: ExecPayload<E>")] +pub struct BuilderBid<E: EthSpec, Payload: ExecPayload<E>> { + #[serde_as(as = "BlindedPayloadAsHeader<E>")] + pub header: Payload, + #[serde(with = "eth2_serde_utils::quoted_u256")] + pub value: Uint256, + pub pubkey: PublicKeyBytes, + #[serde(skip)] + #[tree_hash(skip_hashing)] + _phantom_data: PhantomData<E>, +} + +impl<E: EthSpec, Payload: ExecPayload<E>> SignedRoot for BuilderBid<E, Payload> {} + +/// Validator registration, for use in interacting with servers implementing the builder API. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[serde(bound = "E: EthSpec, Payload: ExecPayload<E>")] +pub struct SignedBuilderBid<E: EthSpec, Payload: ExecPayload<E>> { + pub message: BuilderBid<E, Payload>, + pub signature: Signature, +} + +struct BlindedPayloadAsHeader<E>(PhantomData<E>); + +impl<E: EthSpec, Payload: ExecPayload<E>> SerializeAs<Payload> for BlindedPayloadAsHeader<E> { + fn serialize_as<S>(source: &Payload, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + source.to_execution_payload_header().serialize(serializer) + } +} + +impl<'de, E: EthSpec, Payload: ExecPayload<E>> DeserializeAs<'de, Payload> + for BlindedPayloadAsHeader<E> +{ + fn deserialize_as<D>(deserializer: D) -> Result<Payload, D::Error> + where + D: Deserializer<'de>, + { + let payload_header = ExecutionPayloadHeader::deserialize(deserializer)?; + Payload::try_from(payload_header) + .map_err(|_| serde::de::Error::custom("unable to convert payload header to payload")) + } +} + +impl<E: EthSpec, Payload: ExecPayload<E>> SignedBuilderBid<E, Payload> { + pub fn verify_signature(&self, spec: &ChainSpec) -> bool { + self.message + .pubkey + .decompress() + .map(|pubkey| { + let domain = spec.get_builder_domain(); + let message = self.message.signing_root(domain); + self.signature.verify(&pubkey, message) + }) + .unwrap_or(false) + } +} diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 13f431abcd..4ed12dfbd7 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,3 +1,4 @@ +use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::*; use eth2_serde_utils::quoted_u64::MaybeQuoted; use int_to_bytes::int_to_bytes4; @@ -20,6 +21,7 @@ pub enum Domain { SyncCommittee, ContributionAndProof, SyncCommitteeSelectionProof, + ApplicationMask(ApplicationDomain), } /// Lighthouse's internal configuration struct. @@ -159,6 +161,11 @@ pub struct ChainSpec { pub attestation_subnet_count: u64, pub random_subnets_per_validator: u64, pub epochs_per_random_subnet_subscription: u64, + + /* + * Application params + */ + pub(crate) domain_application_mask: u32, } impl ChainSpec { @@ -333,6 +340,7 @@ impl ChainSpec { Domain::SyncCommittee => self.domain_sync_committee, Domain::ContributionAndProof => self.domain_contribution_and_proof, Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, + Domain::ApplicationMask(application_domain) => application_domain.get_domain_constant(), } } @@ -360,6 +368,17 @@ impl ChainSpec { self.compute_domain(Domain::Deposit, self.genesis_fork_version, Hash256::zero()) } + // This should be updated to include the current fork and the genesis validators root, but discussion is ongoing: + // + // https://github.com/ethereum/builder-specs/issues/14 + pub fn get_builder_domain(&self) -> Hash256 { + self.compute_domain( + Domain::ApplicationMask(ApplicationDomain::Builder), + self.genesis_fork_version, + Hash256::zero(), + ) + } + /// Return the 32-byte fork data root for the `current_version` and `genesis_validators_root`. /// /// This is used primarily in signature domains to avoid collisions across forks/chains. @@ -549,14 +568,9 @@ impl ChainSpec { .expect("pow does not overflow"), proportional_slashing_multiplier_bellatrix: 3, bellatrix_fork_version: [0x02, 0x00, 0x00, 0x00], - bellatrix_fork_epoch: None, - terminal_total_difficulty: Uint256::MAX - .checked_sub(Uint256::from(2u64.pow(10))) - .expect("subtraction does not overflow") - // Add 1 since the spec declares `2**256 - 2**10` and we use - // `Uint256::MAX` which is `2*256- 1`. - .checked_add(Uint256::one()) - .expect("addition does not overflow"), + bellatrix_fork_epoch: Some(Epoch::new(144896)), + terminal_total_difficulty: Uint256::from_dec_str("58750000000000000000000") + .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), safe_slots_to_import_optimistically: 128u64, @@ -572,6 +586,11 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + + /* + * Application specific + */ + domain_application_mask: APPLICATION_DOMAIN_BUILDER, } } @@ -604,6 +623,13 @@ impl ChainSpec { // Merge bellatrix_fork_version: [0x02, 0x00, 0x00, 0x01], bellatrix_fork_epoch: None, + terminal_total_difficulty: Uint256::MAX + .checked_sub(Uint256::from(2u64.pow(10))) + .expect("subtraction does not overflow") + // Add 1 since the spec declares `2**256 - 2**10` and we use + // `Uint256::MAX` which is `2*256- 1`. + .checked_add(Uint256::one()) + .expect("addition does not overflow"), // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -770,6 +796,11 @@ impl ChainSpec { maximum_gossip_clock_disparity_millis: 500, target_aggregators_per_committee: 16, epochs_per_random_subnet_subscription: 256, + + /* + * Application specific + */ + domain_application_mask: APPLICATION_DOMAIN_BUILDER, } } } @@ -781,6 +812,10 @@ impl Default for ChainSpec { } /// Exact implementation of the *config* object from the Ethereum spec (YAML/JSON). +/// +/// Fields relevant to hard forks after Altair should be optional so that we can continue +/// to parse Altair configs. This default approach turns out to be much simpler than trying to +/// make `Config` a superstruct because of the hassle of deserializing an untagged enum. #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "UPPERCASE")] pub struct Config { @@ -791,17 +826,13 @@ pub struct Config { #[serde(default)] pub preset_base: String, - // TODO(merge): remove this default #[serde(default = "default_terminal_total_difficulty")] #[serde(with = "eth2_serde_utils::quoted_u256")] pub terminal_total_difficulty: Uint256, - // TODO(merge): remove this default #[serde(default = "default_terminal_block_hash")] pub terminal_block_hash: ExecutionBlockHash, - // TODO(merge): remove this default #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, - // TODO(merge): remove this default #[serde(default = "default_safe_slots_to_import_optimistically")] #[serde(with = "eth2_serde_utils::quoted_u64")] pub safe_slots_to_import_optimistically: u64, @@ -821,12 +852,10 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub altair_fork_epoch: Option<MaybeQuoted<Epoch>>, - // TODO(merge): remove this default #[serde(default = "default_bellatrix_fork_version")] #[serde(with = "eth2_serde_utils::bytes_4_hex")] bellatrix_fork_version: [u8; 4], - // TODO(merge): remove this default - #[serde(default = "default_bellatrix_fork_epoch")] + #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub bellatrix_fork_epoch: Option<MaybeQuoted<Epoch>>, @@ -868,10 +897,6 @@ fn default_bellatrix_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } -fn default_bellatrix_fork_epoch() -> Option<MaybeQuoted<Epoch>> { - None -} - /// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 @@ -1126,6 +1151,27 @@ mod tests { &spec, ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); + + // The builder domain index is zero + let builder_domain_pre_mask = [0; 4]; + test_domain( + Domain::ApplicationMask(ApplicationDomain::Builder), + apply_bit_mask(builder_domain_pre_mask, &spec), + &spec, + ); + } + + fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 { + let mut domain = [0; 4]; + let mask_bytes = int_to_bytes4(spec.domain_application_mask); + + // Apply application bit mask + for (i, (domain_byte, mask_byte)) in domain_bytes.iter().zip(mask_bytes.iter()).enumerate() + { + domain[i] = domain_byte | mask_byte; + } + + u32::from_le_bytes(domain) } // Test that `fork_name_at_epoch` and `fork_epoch` are consistent. @@ -1292,10 +1338,7 @@ mod yaml_tests { default_safe_slots_to_import_optimistically() ); - assert_eq!( - chain_spec.bellatrix_fork_epoch, - default_bellatrix_fork_epoch() - ); + assert_eq!(chain_spec.bellatrix_fork_epoch, None); assert_eq!( chain_spec.bellatrix_fork_version, @@ -1312,4 +1355,12 @@ mod yaml_tests { ) ); } + + #[test] + fn test_domain_builder() { + assert_eq!( + int_to_bytes4(ApplicationDomain::Builder.get_domain_constant()), + [0, 0, 0, 1] + ); + } } diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index f721e6c3bb..e624afe2db 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,12 +1,21 @@ -use crate::{AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec}; +use crate::{ + consts::altair, AltairPreset, BasePreset, BellatrixPreset, ChainSpec, Config, EthSpec, ForkName, +}; +use maplit::hashmap; use serde_derive::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; +use superstruct::superstruct; /// Fusion of a runtime-config with the compile-time preset values. /// /// Mostly useful for the API. +#[superstruct( + variants(Altair, Bellatrix), + variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) +)] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +#[serde(untagged)] pub struct ConfigAndPreset { #[serde(flatten)] pub config: Config, @@ -15,76 +24,75 @@ pub struct ConfigAndPreset { pub base_preset: BasePreset, #[serde(flatten)] pub altair_preset: AltairPreset, - // TODO(merge): re-enable - // #[serde(flatten)] - // pub bellatrix_preset: BellatrixPreset, + #[superstruct(only(Bellatrix))] + #[serde(flatten)] + pub bellatrix_preset: BellatrixPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap<String, Value>, } impl ConfigAndPreset { - pub fn from_chain_spec<T: EthSpec>(spec: &ChainSpec) -> Self { + pub fn from_chain_spec<T: EthSpec>(spec: &ChainSpec, fork_name: Option<ForkName>) -> Self { let config = Config::from_chain_spec::<T>(spec); let base_preset = BasePreset::from_chain_spec::<T>(spec); let altair_preset = AltairPreset::from_chain_spec::<T>(spec); - // TODO(merge): re-enable - let _bellatrix_preset = BellatrixPreset::from_chain_spec::<T>(spec); - let extra_fields = HashMap::new(); + let extra_fields = get_extra_fields(spec); - Self { - config, - base_preset, - altair_preset, - extra_fields, + if spec.bellatrix_fork_epoch.is_some() + || fork_name == None + || fork_name == Some(ForkName::Merge) + { + let bellatrix_preset = BellatrixPreset::from_chain_spec::<T>(spec); + + ConfigAndPreset::Bellatrix(ConfigAndPresetBellatrix { + config, + base_preset, + altair_preset, + bellatrix_preset, + extra_fields, + }) + } else { + ConfigAndPreset::Altair(ConfigAndPresetAltair { + config, + base_preset, + altair_preset, + extra_fields, + }) } } +} - /// Add fields that were previously part of the config but are now constants. - pub fn make_backwards_compat(&mut self, spec: &ChainSpec) { - let hex_string = |value: &[u8]| format!("0x{}", hex::encode(&value)); - let u32_hex = |v: u32| hex_string(&v.to_le_bytes()); - let u8_hex = |v: u8| hex_string(&v.to_le_bytes()); - let fields = vec![ - ( - "bls_withdrawal_prefix", - u8_hex(spec.bls_withdrawal_prefix_byte), - ), - ( - "domain_beacon_proposer", - u32_hex(spec.domain_beacon_proposer), - ), - ( - "domain_beacon_attester", - u32_hex(spec.domain_beacon_attester), - ), - ("domain_randao", u32_hex(spec.domain_randao)), - ("domain_deposit", u32_hex(spec.domain_deposit)), - ("domain_voluntary_exit", u32_hex(spec.domain_voluntary_exit)), - ( - "domain_selection_proof", - u32_hex(spec.domain_selection_proof), - ), - ( - "domain_aggregate_and_proof", - u32_hex(spec.domain_aggregate_and_proof), - ), - ( - "target_aggregators_per_committee", - spec.target_aggregators_per_committee.to_string(), - ), - ( - "random_subnets_per_validator", - spec.random_subnets_per_validator.to_string(), - ), - ( - "epochs_per_random_subnet_subscription", - spec.epochs_per_random_subnet_subscription.to_string(), - ), - ]; - for (key, value) in fields { - self.extra_fields.insert(key.to_uppercase(), value.into()); - } +/// Get a hashmap of constants to add to the `PresetAndConfig` +pub fn get_extra_fields(spec: &ChainSpec) -> HashMap<String, Value> { + let hex_string = |value: &[u8]| format!("0x{}", hex::encode(&value)).into(); + let u32_hex = |v: u32| hex_string(&v.to_le_bytes()); + let u8_hex = |v: u8| hex_string(&v.to_le_bytes()); + hashmap! { + "bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte), + "domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer), + "domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester), + "domain_randao".to_uppercase()=> u32_hex(spec.domain_randao), + "domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit), + "domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit), + "domain_selection_proof".to_uppercase() => u32_hex(spec.domain_selection_proof), + "domain_aggregate_and_proof".to_uppercase() => u32_hex(spec.domain_aggregate_and_proof), + "domain_application_mask".to_uppercase()=> u32_hex(spec.domain_application_mask), + "target_aggregators_per_committee".to_uppercase() => + spec.target_aggregators_per_committee.to_string().into(), + "random_subnets_per_validator".to_uppercase() => + spec.random_subnets_per_validator.to_string().into(), + "epochs_per_random_subnet_subscription".to_uppercase() => + spec.epochs_per_random_subnet_subscription.to_string().into(), + "domain_contribution_and_proof".to_uppercase() => + u32_hex(spec.domain_contribution_and_proof), + "domain_sync_committee".to_uppercase() => u32_hex(spec.domain_sync_committee), + "domain_sync_committee_selection_proof".to_uppercase() => + u32_hex(spec.domain_sync_committee_selection_proof), + "sync_committee_subnet_count".to_uppercase() => + altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), + "target_aggregators_per_sync_subcommittee".to_uppercase() => + altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), } } @@ -104,15 +112,16 @@ mod test { .open(tmp_file.as_ref()) .expect("error opening file"); let mainnet_spec = ChainSpec::mainnet(); - let mut yamlconfig = ConfigAndPreset::from_chain_spec::<MainnetEthSpec>(&mainnet_spec); + let mut yamlconfig = + ConfigAndPreset::from_chain_spec::<MainnetEthSpec>(&mainnet_spec, None); let (k1, v1) = ("SAMPLE_HARDFORK_KEY1", "123456789"); let (k2, v2) = ("SAMPLE_HARDFORK_KEY2", "987654321"); let (k3, v3) = ("SAMPLE_HARDFORK_KEY3", 32); let (k4, v4) = ("SAMPLE_HARDFORK_KEY4", Value::Null); - yamlconfig.extra_fields.insert(k1.into(), v1.into()); - yamlconfig.extra_fields.insert(k2.into(), v2.into()); - yamlconfig.extra_fields.insert(k3.into(), v3.into()); - yamlconfig.extra_fields.insert(k4.into(), v4); + yamlconfig.extra_fields_mut().insert(k1.into(), v1.into()); + yamlconfig.extra_fields_mut().insert(k2.into(), v2.into()); + yamlconfig.extra_fields_mut().insert(k3.into(), v3.into()); + yamlconfig.extra_fields_mut().insert(k4.into(), v4); serde_yaml::to_writer(writer, &yamlconfig).expect("failed to write or serialize"); @@ -121,8 +130,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPreset = + let from: ConfigAndPresetBellatrix = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(from, yamlconfig); + assert_eq!(ConfigAndPreset::Bellatrix(from), yamlconfig); } } diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index 0a10c4daa4..988dcece5e 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -1,12 +1,14 @@ use crate::test_utils::TestRandom; use crate::Hash256; +use derivative::Derivative; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] +#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] +#[derivative(Debug = "transparent")] #[serde(transparent)] pub struct ExecutionBlockHash(Hash256); diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 4a2e762087..e97b08309b 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -106,14 +106,14 @@ macro_rules! map_fork_name_with { } impl FromStr for ForkName { - type Err = (); + type Err = String; - fn from_str(fork_name: &str) -> Result<Self, ()> { + fn from_str(fork_name: &str) -> Result<Self, String> { Ok(match fork_name.to_lowercase().as_ref() { "phase0" | "base" => ForkName::Base, "altair" => ForkName::Altair, "bellatrix" | "merge" => ForkName::Merge, - _ => return Err(()), + _ => return Err(format!("unknown fork name: {}", fork_name)), }) } } @@ -138,7 +138,7 @@ impl TryFrom<String> for ForkName { type Error = String; fn try_from(s: String) -> Result<Self, Self::Error> { - Self::from_str(&s).map_err(|()| format!("Invalid fork name: {}", s)) + Self::from_str(&s) } } @@ -178,8 +178,8 @@ mod test { assert_eq!(ForkName::from_str("AlTaIr"), Ok(ForkName::Altair)); assert_eq!(ForkName::from_str("altair"), Ok(ForkName::Altair)); - assert_eq!(ForkName::from_str("NO_NAME"), Err(())); - assert_eq!(ForkName::from_str("no_name"), Err(())); + assert!(ForkName::from_str("NO_NAME").is_err()); + assert!(ForkName::from_str("no_name").is_err()); } #[test] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 1cdee71341..13f1fbd621 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -18,6 +18,7 @@ extern crate lazy_static; pub mod test_utils; pub mod aggregate_and_proof; +pub mod application_domain; pub mod attestation; pub mod attestation_data; pub mod attestation_duty; @@ -27,6 +28,7 @@ pub mod beacon_block_body; pub mod beacon_block_header; pub mod beacon_committee; pub mod beacon_state; +pub mod builder_bid; pub mod chain_spec; pub mod checkpoint; pub mod consts; @@ -81,6 +83,7 @@ pub mod sync_committee_contribution; pub mod sync_committee_message; pub mod sync_selection_proof; pub mod sync_subnet_id; +pub mod validator_registration_data; pub mod slot_data; #[cfg(feature = "sqlite")] @@ -106,7 +109,9 @@ pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{Error as BeaconStateError, *}; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; -pub use crate::config_and_preset::ConfigAndPreset; +pub use crate::config_and_preset::{ + ConfigAndPreset, ConfigAndPresetAltair, ConfigAndPresetBellatrix, +}; pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; @@ -156,6 +161,7 @@ pub use crate::sync_duty::SyncDuty; pub use crate::sync_selection_proof::SyncSelectionProof; pub use crate::sync_subnet_id::SyncSubnetId; pub use crate::validator::{Validator, ValidatorImmutable}; +pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index cb4678e8d9..0a0432e7eb 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -9,6 +9,7 @@ use std::hash::Hash; use test_random_derive::TestRandom; use tree_hash::TreeHash; +#[derive(Debug)] pub enum BlockType { Blinded, Full, @@ -18,6 +19,7 @@ pub trait ExecPayload<T: EthSpec>: Debug + Clone + Encode + + Debug + Decode + TestRandom + TreeHash @@ -28,6 +30,8 @@ pub trait ExecPayload<T: EthSpec>: + Hash + TryFrom<ExecutionPayloadHeader<T>> + From<ExecutionPayload<T>> + + Send + + 'static { fn block_type() -> BlockType; @@ -42,6 +46,8 @@ pub trait ExecPayload<T: EthSpec>: fn block_number(&self) -> u64; fn timestamp(&self) -> u64; fn block_hash(&self) -> ExecutionBlockHash; + fn fee_recipient(&self) -> Address; + fn gas_limit(&self) -> u64; } impl<T: EthSpec> ExecPayload<T> for FullPayload<T> { @@ -72,6 +78,14 @@ impl<T: EthSpec> ExecPayload<T> for FullPayload<T> { fn block_hash(&self) -> ExecutionBlockHash { self.execution_payload.block_hash } + + fn fee_recipient(&self) -> Address { + self.execution_payload.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.execution_payload.gas_limit + } } impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> { @@ -102,6 +116,14 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> { fn block_hash(&self) -> ExecutionBlockHash { self.execution_payload_header.block_hash } + + fn fee_recipient(&self) -> Address { + self.execution_payload_header.fee_recipient + } + + fn gas_limit(&self) -> u64 { + self.execution_payload_header.gas_limit + } } #[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/proposer_slashing.rs index ff12b0611a..ca048b149a 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/proposer_slashing.rs @@ -18,6 +18,13 @@ pub struct ProposerSlashing { pub signed_header_2: SignedBeaconBlockHeader, } +impl ProposerSlashing { + /// Get proposer index, assuming slashing validity has already been checked. + pub fn proposer_index(&self) -> u64 { + self.signed_header_1.message.proposer_index + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 5488070688..5c40c4685c 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -346,6 +346,14 @@ impl<E: EthSpec> From<SignedBeaconBlock<E>> for SignedBlindedBeaconBlock<E> { } } +// We can blind borrowed blocks with payloads by converting the payload into a header (without +// cloning the payload contents). +impl<E: EthSpec> SignedBeaconBlock<E> { + pub fn clone_as_blinded(&self) -> SignedBlindedBeaconBlock<E> { + SignedBeaconBlock::from_block(self.message().into(), self.signature().clone()) + } +} + #[cfg(test)] mod test { use super::*; diff --git a/consensus/types/src/test_utils/macros.rs b/consensus/types/src/test_utils/macros.rs index df449c712d..1e275a5760 100644 --- a/consensus/types/src/test_utils/macros.rs +++ b/consensus/types/src/test_utils/macros.rs @@ -13,8 +13,8 @@ macro_rules! ssz_tests { ($type: ty) => { #[test] pub fn test_ssz_round_trip() { - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use ssz::{ssz_encode, Decode}; + use $crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; let mut rng = XorShiftRng::from_seed([42; 16]); let original = <$type>::random_for_test(&mut rng); @@ -33,8 +33,8 @@ macro_rules! tree_hash_tests { ($type: ty) => { #[test] pub fn test_tree_hash_root() { - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use tree_hash::TreeHash; + use $crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; let mut rng = XorShiftRng::from_seed([42; 16]); let original = <$type>::random_for_test(&mut rng); diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 5e2a5e07af..519387bd58 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -129,6 +129,7 @@ macro_rules! impl_test_random_for_u8_array { }; } +impl_test_random_for_u8_array!(3); impl_test_random_for_u8_array!(4); impl_test_random_for_u8_array!(32); impl_test_random_for_u8_array!(48); diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs new file mode 100644 index 0000000000..5a3450df08 --- /dev/null +++ b/consensus/types/src/validator_registration_data.rs @@ -0,0 +1,23 @@ +use crate::*; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use tree_hash_derive::TreeHash; + +/// Validator registration, for use in interacting with servers implementing the builder API. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SignedValidatorRegistrationData { + pub message: ValidatorRegistrationData, + pub signature: Signature, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, TreeHash)] +pub struct ValidatorRegistrationData { + pub fee_recipient: Address, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub gas_limit: u64, + #[serde(with = "eth2_serde_utils::quoted_u64")] + pub timestamp: u64, + pub pubkey: PublicKeyBytes, +} + +impl SignedRoot for ValidatorRegistrationData {} diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index 912f49c6f0..9ac468d227 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -17,12 +17,12 @@ eth2_hashing = "0.3.0" ethereum-types = "0.12.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } -blst = "0.3.3" +blst = { version = "0.3.3", optional = true } [features] default = ["supranational"] fake_crypto = [] milagro = ["milagro_bls"] -supranational = [] +supranational = ["blst"] supranational-portable = ["supranational", "blst/portable"] supranational-force-adx = ["supranational", "blst/force-adx"] diff --git a/crypto/bls/src/impls/mod.rs b/crypto/bls/src/impls/mod.rs index 7a99798be3..b3f2da77b1 100644 --- a/crypto/bls/src/impls/mod.rs +++ b/crypto/bls/src/impls/mod.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "supranational")] pub mod blst; pub mod fake_crypto; #[cfg(feature = "milagro")] diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 8a31a90a14..eacbc2b268 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -41,6 +41,7 @@ pub use generic_signature::{INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN}; pub use get_withdrawal_credentials::get_withdrawal_credentials; pub use zeroize_hash::ZeroizeHash; +#[cfg(feature = "supranational")] use blst::BLST_ERROR as BlstError; #[cfg(feature = "milagro")] use milagro_bls::AmclError; @@ -53,6 +54,7 @@ pub enum Error { #[cfg(feature = "milagro")] MilagroError(AmclError), /// An error was raised from the Supranational BLST BLS library. + #[cfg(feature = "supranational")] BlstError(BlstError), /// The provided bytes were an incorrect length. InvalidByteLength { got: usize, expected: usize }, @@ -71,6 +73,7 @@ impl From<AmclError> for Error { } } +#[cfg(feature = "supranational")] impl From<BlstError> for Error { fn from(e: BlstError) -> Error { Error::BlstError(e) @@ -130,6 +133,7 @@ macro_rules! define_mod { #[cfg(feature = "milagro")] define_mod!(milagro_implementations, crate::impls::milagro::types); +#[cfg(feature = "supranational")] define_mod!(blst_implementations, crate::impls::blst::types); #[cfg(feature = "fake_crypto")] define_mod!( diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index 7490ab6093..eb92d252d1 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -8,9 +8,9 @@ description = "Hashing primitives used in Ethereum 2.0" [dependencies] lazy_static = { version = "1.4.0", optional = true } +cpufeatures = { version = "0.2.2", optional = true } ring = "0.16.19" sha2 = "0.10.2" -cpufeatures = "0.2.2" [dev-dependencies] rustc-hex = "2.1.0" @@ -19,5 +19,6 @@ rustc-hex = "2.1.0" wasm-bindgen-test = "0.3.18" [features] -default = ["zero_hash_cache"] +default = ["zero_hash_cache", "detect-cpufeatures"] zero_hash_cache = ["lazy_static"] +detect-cpufeatures = ["cpufeatures"] diff --git a/crypto/eth2_hashing/src/lib.rs b/crypto/eth2_hashing/src/lib.rs index c5c034640b..36a3d14139 100644 --- a/crypto/eth2_hashing/src/lib.rs +++ b/crypto/eth2_hashing/src/lib.rs @@ -127,15 +127,15 @@ pub enum DynamicImpl { // Runtime latch for detecting the availability of SHA extensions on x86_64. // // Inspired by the runtime switch within the `sha2` crate itself. -#[cfg(target_arch = "x86_64")] +#[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] cpufeatures::new!(x86_sha_extensions, "sha", "sse2", "ssse3", "sse4.1"); #[inline(always)] pub fn have_sha_extensions() -> bool { - #[cfg(target_arch = "x86_64")] + #[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] return x86_sha_extensions::get(); - #[cfg(not(target_arch = "x86_64"))] + #[cfg(not(all(feature = "detect-cpufeatures", target_arch = "x86_64")))] return false; } diff --git a/crypto/eth2_keystore/src/json_keystore/kdf_module.rs b/crypto/eth2_keystore/src/json_keystore/kdf_module.rs index a1295e859c..94aeab0682 100644 --- a/crypto/eth2_keystore/src/json_keystore/kdf_module.rs +++ b/crypto/eth2_keystore/src/json_keystore/kdf_module.rs @@ -58,9 +58,10 @@ impl Kdf { } /// PRF for use in `pbkdf2`. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Default)] pub enum Prf { #[serde(rename = "hmac-sha256")] + #[default] HmacSha256, } @@ -73,12 +74,6 @@ impl Prf { } } -impl Default for Prf { - fn default() -> Self { - Prf::HmacSha256 - } -} - /// Parameters for `pbkdf2` key derivation. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(deny_unknown_fields)] diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 6717bb0f46..50295df4b0 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -222,7 +222,7 @@ pub fn migrate_db<E: EthSpec>( runtime_context: &RuntimeContext<E>, log: Logger, ) -> Result<(), Error> { - let spec = runtime_context.eth2_config.spec.clone(); + let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); @@ -236,7 +236,7 @@ pub fn migrate_db<E: EthSpec>( Ok(()) }, client_config.store.clone(), - spec, + spec.clone(), log.clone(), )?; @@ -253,6 +253,7 @@ pub fn migrate_db<E: EthSpec>( from, to, log, + spec, ) } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index d5b0dc3e96..a39abb3f78 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "2.2.1" +version = "3.1.0" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2021" @@ -37,4 +37,6 @@ web3 = { version = "0.18.0", default-features = false, features = ["http-tls", " eth1_test_rig = { path = "../testing/eth1_test_rig" } sensitive_url = { path = "../common/sensitive_url" } eth2 = { path = "../common/eth2" } +snap = "1.0.1" +beacon_chain = { path = "../beacon_node/beacon_chain" } store = { path = "../beacon_node/store" } diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 27ec8cc86c..8fd3567cdc 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,13 +1,13 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.58.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake +FROM rust:1.62.1-bullseye AS builder +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG PORTABLE ENV PORTABLE $PORTABLE RUN cd lighthouse && make install-lcli -FROM ubuntu:latest +FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 689107228e..1046241953 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -1,7 +1,7 @@ use clap::ArgMatches; use environment::Environment; use eth2_network_config::Eth2NetworkConfig; -use genesis::{Eth1Config, Eth1GenesisService}; +use genesis::{Eth1Config, Eth1Endpoint, Eth1GenesisService}; use sensitive_url::SensitiveUrl; use ssz::Encode; use std::cmp::max; @@ -35,11 +35,12 @@ pub fn run<T: EthSpec>( let mut config = Eth1Config::default(); if let Some(v) = endpoints.clone() { - config.endpoints = v + let endpoints = v .iter() .map(|s| SensitiveUrl::parse(s)) .collect::<Result<_, _>>() .map_err(|e| format!("Unable to parse eth1 endpoint URL: {:?}", e))?; + config.endpoints = Eth1Endpoint::NoAuth(endpoints); } config.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); config.deposit_contract_deploy_block = eth2_network_config.deposit_contract_deploy_block; diff --git a/lcli/src/indexed_attestations.rs b/lcli/src/indexed_attestations.rs new file mode 100644 index 0000000000..6e3bfa51d3 --- /dev/null +++ b/lcli/src/indexed_attestations.rs @@ -0,0 +1,48 @@ +use clap::ArgMatches; +use clap_utils::parse_required; +use state_processing::common::get_indexed_attestation; +use std::fs::File; +use std::io::Read; +use std::path::{Path, PathBuf}; +use types::*; + +fn read_file_bytes(filename: &Path) -> Result<Vec<u8>, String> { + let mut bytes = vec![]; + let mut file = File::open(filename) + .map_err(|e| format!("Unable to open {}: {}", filename.display(), e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {}: {}", filename.display(), e))?; + Ok(bytes) +} + +pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> { + let spec = &T::default_spec(); + + let state_file: PathBuf = parse_required(matches, "state")?; + let attestations_file: PathBuf = parse_required(matches, "attestations")?; + + let mut state = BeaconState::<T>::from_ssz_bytes(&read_file_bytes(&state_file)?, spec) + .map_err(|e| format!("Invalid state: {:?}", e))?; + state + .build_all_committee_caches(spec) + .map_err(|e| format!("{:?}", e))?; + + let attestations: Vec<Attestation<T>> = + serde_json::from_slice(&read_file_bytes(&attestations_file)?) + .map_err(|e| format!("Invalid attestation list: {:?}", e))?; + + let indexed_attestations = attestations + .into_iter() + .map(|att| { + let committee = state.get_beacon_committee(att.data.slot, att.data.index)?; + get_indexed_attestation(committee.committee, &att) + }) + .collect::<Result<Vec<_>, _>>() + .map_err(|e| format!("Error constructing indexed attestation: {:?}", e))?; + + let string_output = serde_json::to_string_pretty(&indexed_attestations) + .map_err(|e| format!("Unable to convert to JSON: {:?}", e))?; + println!("{}", string_output); + + Ok(()) +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 996bfc0ac7..e6a4eeeacb 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -6,6 +6,7 @@ mod create_payload_header; mod deploy_deposit_contract; mod eth1_genesis; mod generate_bootnode_enr; +mod indexed_attestations; mod insecure_validators; mod interop_genesis; mod new_testnet; @@ -21,7 +22,6 @@ use parse_ssz::run_parse_ssz; use std::path::PathBuf; use std::process; use std::str::FromStr; -use transition_blocks::run_transition_blocks; use types::{EthSpec, EthSpecId}; fn main() { @@ -56,53 +56,149 @@ fn main() { "Performs a state transition from some state across some number of skip slots", ) .arg( - Arg::with_name("pre-state") - .value_name("BEACON_STATE") + Arg::with_name("output-path") + .long("output-path") + .value_name("PATH") .takes_value(true) - .required(true) + .help("Path to output a SSZ file."), + ) + .arg( + Arg::with_name("pre-state-path") + .long("pre-state-path") + .value_name("PATH") + .takes_value(true) + .conflicts_with("beacon-url") .help("Path to a SSZ file of the pre-state."), ) .arg( - Arg::with_name("slots") - .value_name("SLOT_COUNT") + Arg::with_name("beacon-url") + .long("beacon-url") + .value_name("URL") .takes_value(true) - .required(true) - .help("Number of slots to skip before outputting a state.."), + .help("URL to a beacon-API provider."), ) .arg( - Arg::with_name("output") - .value_name("SSZ_FILE") + Arg::with_name("state-id") + .long("state-id") + .value_name("STATE_ID") .takes_value(true) - .required(true) - .default_value("./output.ssz") - .help("Path to output a SSZ file."), - ), + .requires("beacon-url") + .help("Identifier for a state as per beacon-API standards (slot, root, etc.)"), + ) + .arg( + Arg::with_name("runs") + .long("runs") + .value_name("INTEGER") + .takes_value(true) + .default_value("1") + .help("Number of repeat runs, useful for benchmarking."), + ) + .arg( + Arg::with_name("state-root") + .long("state-root") + .value_name("HASH256") + .takes_value(true) + .help("Tree hash root of the provided state, to avoid computing it."), + ) + .arg( + Arg::with_name("slots") + .long("slots") + .value_name("INTEGER") + .takes_value(true) + .help("Number of slots to skip forward."), + ) + .arg( + Arg::with_name("partial-state-advance") + .long("partial-state-advance") + .takes_value(false) + .help("If present, don't compute state roots when skipping forward."), + ) ) .subcommand( SubCommand::with_name("transition-blocks") .about("Performs a state transition given a pre-state and block") .arg( - Arg::with_name("pre-state") - .value_name("BEACON_STATE") + Arg::with_name("pre-state-path") + .long("pre-state-path") + .value_name("PATH") .takes_value(true) - .required(true) - .help("Path to a SSZ file of the pre-state."), + .conflicts_with("beacon-url") + .requires("block-path") + .help("Path to load a BeaconState from file as SSZ."), ) .arg( - Arg::with_name("block") - .value_name("BEACON_BLOCK") + Arg::with_name("block-path") + .long("block-path") + .value_name("PATH") .takes_value(true) - .required(true) - .help("Path to a SSZ file of the block to apply to pre-state."), + .conflicts_with("beacon-url") + .requires("pre-state-path") + .help("Path to load a SignedBeaconBlock from file as SSZ."), ) .arg( - Arg::with_name("output") - .value_name("SSZ_FILE") + Arg::with_name("post-state-output-path") + .long("post-state-output-path") + .value_name("PATH") .takes_value(true) - .required(true) - .default_value("./output.ssz") - .help("Path to output a SSZ file."), - ), + .help("Path to output the post-state."), + ) + .arg( + Arg::with_name("pre-state-output-path") + .long("pre-state-output-path") + .value_name("PATH") + .takes_value(true) + .help("Path to output the pre-state, useful when used with --beacon-url."), + ) + .arg( + Arg::with_name("block-output-path") + .long("block-output-path") + .value_name("PATH") + .takes_value(true) + .help("Path to output the block, useful when used with --beacon-url."), + ) + .arg( + Arg::with_name("beacon-url") + .long("beacon-url") + .value_name("URL") + .takes_value(true) + .help("URL to a beacon-API provider."), + ) + .arg( + Arg::with_name("block-id") + .long("block-id") + .value_name("BLOCK_ID") + .takes_value(true) + .requires("beacon-url") + .help("Identifier for a block as per beacon-API standards (slot, root, etc.)"), + ) + .arg( + Arg::with_name("runs") + .long("runs") + .value_name("INTEGER") + .takes_value(true) + .default_value("1") + .help("Number of repeat runs, useful for benchmarking."), + ) + .arg( + Arg::with_name("no-signature-verification") + .long("no-signature-verification") + .takes_value(false) + .help("Disable signature verification.") + ) + .arg( + Arg::with_name("exclude-cache-builds") + .long("exclude-cache-builds") + .takes_value(false) + .help("If present, pre-build the committee and tree-hash caches without \ + including them in the timings."), + ) + .arg( + Arg::with_name("exclude-post-block-thc") + .long("exclude-post-block-thc") + .takes_value(false) + .help("If present, don't rebuild the tree-hash-cache after applying \ + the block."), + ) ) .subcommand( SubCommand::with_name("pretty-ssz") @@ -598,6 +694,26 @@ fn main() { .help("The number of nodes to divide the validator keys to"), ) ) + .subcommand( + SubCommand::with_name("indexed-attestations") + .about("Convert attestations to indexed form, using the committees from a state.") + .arg( + Arg::with_name("state") + .long("state") + .value_name("SSZ_STATE") + .takes_value(true) + .required(true) + .help("BeaconState to generate committees from (SSZ)"), + ) + .arg( + Arg::with_name("attestations") + .long("attestations") + .value_name("JSON_ATTESTATIONS") + .takes_value(true) + .required(true) + .help("List of Attestations to convert to indexed form (JSON)"), + ) + ) .get_matches(); let result = matches @@ -631,6 +747,7 @@ fn run<T: EthSpec>( debug_level: "trace", logfile_debug_level: "trace", log_format: None, + log_color: false, max_log_size: 0, max_log_number: 0, compression: false, @@ -646,10 +763,11 @@ fn run<T: EthSpec>( )?; match matches.subcommand() { - ("transition-blocks", Some(matches)) => run_transition_blocks::<T>(testnet_dir, matches) + ("transition-blocks", Some(matches)) => transition_blocks::run::<T>(env, matches) .map_err(|e| format!("Failed to transition blocks: {}", e)), - ("skip-slots", Some(matches)) => skip_slots::run::<T>(testnet_dir, matches) - .map_err(|e| format!("Failed to skip slots: {}", e)), + ("skip-slots", Some(matches)) => { + skip_slots::run::<T>(env, matches).map_err(|e| format!("Failed to skip slots: {}", e)) + } ("pretty-ssz", Some(matches)) => { run_parse_ssz::<T>(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) } @@ -679,6 +797,8 @@ fn run<T: EthSpec>( .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), ("insecure-validators", Some(matches)) => insecure_validators::run(matches) .map_err(|e| format!("Failed to run insecure-validators command: {}", e)), + ("indexed-attestations", Some(matches)) => indexed_attestations::run::<T>(matches) + .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } diff --git a/lcli/src/parse_ssz.rs b/lcli/src/parse_ssz.rs index 3f272780db..5d988ee181 100644 --- a/lcli/src/parse_ssz.rs +++ b/lcli/src/parse_ssz.rs @@ -1,7 +1,9 @@ use clap::ArgMatches; use clap_utils::parse_required; use serde::Serialize; +use snap::raw::Decoder; use ssz::Decode; +use std::fs; use std::fs::File; use std::io::Read; use std::str::FromStr; @@ -29,11 +31,18 @@ pub fn run_parse_ssz<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> { let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; let format = parse_required(matches, "format")?; - let mut bytes = vec![]; - let mut file = - File::open(filename).map_err(|e| format!("Unable to open {}: {}", filename, e))?; - file.read_to_end(&mut bytes) - .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + let bytes = if filename.ends_with("ssz_snappy") { + let bytes = fs::read(filename).unwrap(); + let mut decoder = Decoder::new(); + decoder.decompress_vec(&bytes).unwrap() + } else { + let mut bytes = vec![]; + let mut file = + File::open(filename).map_err(|e| format!("Unable to open {}: {}", filename, e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + bytes + }; info!("Using {} spec", T::spec_name()); info!("Type: {:?}", type_str); diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index cb502d37ae..28310f7683 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -1,58 +1,150 @@ +//! # Skip-Slots +//! +//! Use this tool to process a `BeaconState` through empty slots. Useful for benchmarking or +//! troubleshooting consensus failures. +//! +//! It can load states from file or pull them from a beaconAPI. States pulled from a beaconAPI can +//! be saved to disk to reduce future calls to that server. +//! +//! ## Examples +//! +//! ### Example 1. +//! +//! Download a state from a HTTP endpoint and skip forward an epoch, twice (the initial state is +//! advanced 32 slots twice, rather than it being advanced 64 slots): +//! +//! ```ignore +//! lcli skip-slots \ +//! --beacon-url http://localhost:5052 \ +//! --state-id 0x3cdc33cd02713d8d6cc33a6dbe2d3a5bf9af1d357de0d175a403496486ff845e \\ +//! --slots 32 \ +//! --runs 2 +//! ``` +//! +//! ### Example 2. +//! +//! Download a state to a SSZ file (without modifying it): +//! +//! ```ignore +//! lcli skip-slots \ +//! --beacon-url http://localhost:5052 \ +//! --state-id 0x3cdc33cd02713d8d6cc33a6dbe2d3a5bf9af1d357de0d175a403496486ff845e \ +//! --slots 0 \ +//! --runs 0 \ +//! --output-path /tmp/state-0x3cdc.ssz +//! ``` +//! +//! ### Example 3. +//! +//! Do two runs over the state that was downloaded in the previous example: +//! +//! ```ignore +//! lcli skip-slots \ +//! --pre-state-path /tmp/state-0x3cdc.ssz \ +//! --slots 32 \ +//! --runs 2 +//! ``` use crate::transition_blocks::load_from_ssz_with; use clap::ArgMatches; -use eth2_network_config::Eth2NetworkConfig; +use clap_utils::{parse_optional, parse_required}; +use environment::Environment; +use eth2::{types::StateId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use ssz::Encode; -use state_processing::per_slot_processing; +use state_processing::state_advance::{complete_state_advance, partial_state_advance}; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; -use types::{BeaconState, EthSpec}; +use std::time::{Duration, Instant}; +use types::{BeaconState, CloneConfig, EthSpec, Hash256}; -pub fn run<T: EthSpec>(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), String> { - let pre_state_path = matches - .value_of("pre-state") - .ok_or("No pre-state file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse pre-state path: {}", e))?; +const HTTP_TIMEOUT: Duration = Duration::from_secs(10); - let slots = matches - .value_of("slots") - .ok_or("No slots supplied")? - .parse::<usize>() - .map_err(|e| format!("Failed to parse slots: {}", e))?; +pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<(), String> { + let spec = &T::default_spec(); + let executor = env.core_context().executor; - let output_path = matches - .value_of("output") - .ok_or("No output file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse output path: {}", e))?; + let output_path: Option<PathBuf> = parse_optional(matches, "output-path")?; + let state_path: Option<PathBuf> = parse_optional(matches, "pre-state-path")?; + let beacon_url: Option<SensitiveUrl> = parse_optional(matches, "beacon-url")?; + let runs: usize = parse_required(matches, "runs")?; + let slots: u64 = parse_required(matches, "slots")?; + let cli_state_root: Option<Hash256> = parse_optional(matches, "state-root")?; + let partial: bool = matches.is_present("partial-state-advance"); info!("Using {} spec", T::spec_name()); - info!("Pre-state path: {:?}", pre_state_path); - info!("Slots: {:?}", slots); + info!("Advancing {} slots", slots); + info!("Doing {} runs", runs); - let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; - let spec = ð2_network_config.chain_spec::<T>()?; + let (mut state, state_root) = match (state_path, beacon_url) { + (Some(state_path), None) => { + info!("State path: {:?}", state_path); + let state = load_from_ssz_with(&state_path, spec, BeaconState::from_ssz_bytes)?; + (state, None) + } + (None, Some(beacon_url)) => { + let state_id: StateId = parse_required(matches, "state-id")?; + let client = BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(HTTP_TIMEOUT)); + let state = executor + .handle() + .ok_or("shutdown in progress")? + .block_on(async move { + client + .get_debug_beacon_states::<T>(state_id) + .await + .map_err(|e| format!("Failed to download state: {:?}", e)) + }) + .map_err(|e| format!("Failed to complete task: {:?}", e))? + .ok_or_else(|| format!("Unable to locate state at {:?}", state_id))? + .data; + let state_root = match state_id { + StateId::Root(root) => Some(root), + _ => None, + }; + (state, state_root) + } + _ => return Err("must supply either --state-path or --beacon-url".into()), + }; - let mut state: BeaconState<T> = - load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; + let initial_slot = state.slot(); + let target_slot = initial_slot + slots; state .build_all_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; - // Transition the parent state to the block slot. - for i in 0..slots { - per_slot_processing(&mut state, None, spec) - .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; + let state_root = if let Some(root) = cli_state_root.or(state_root) { + root + } else { + state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build THC: {:?}", e))? + }; + + for i in 0..runs { + let mut state = state.clone_with(CloneConfig::committee_caches_only()); + + let start = Instant::now(); + + if partial { + partial_state_advance(&mut state, Some(state_root), target_slot, spec) + .map_err(|e| format!("Unable to perform partial advance: {:?}", e))?; + } else { + complete_state_advance(&mut state, Some(state_root), target_slot, spec) + .map_err(|e| format!("Unable to perform complete advance: {:?}", e))?; + } + + let duration = Instant::now().duration_since(start); + info!("Run {}: {:?}", i, duration); } - let mut output_file = - File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + if let Some(output_path) = output_path { + let mut output_file = File::create(output_path) + .map_err(|e| format!("Unable to create output file: {:?}", e))?; - output_file - .write_all(&state.as_ssz_bytes()) - .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + output_file + .write_all(&state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + } Ok(()) } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 479964f994..6159723c98 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -1,63 +1,289 @@ +//! # Transition Blocks +//! +//! Use this tool to apply a `SignedBeaconBlock` to a `BeaconState`. Useful for benchmarking or +//! troubleshooting consensus failures. +//! +//! It can load states and blocks from file or pull them from a beaconAPI. Objects pulled from a +//! beaconAPI can be saved to disk to reduce future calls to that server. +//! +//! ## Examples +//! +//! ### Run using a block from a beaconAPI +//! +//! Download the 0x6c69 block and its pre-state (the state from its parent block) from the +//! beaconAPI. Advance the pre-state to the slot of the 0x6c69 block and apply that block to the +//! pre-state. +//! +//! ```ignore +//! lcli transition-blocks \ +//! --beacon-url http://localhost:5052 \ +//! --block-id 0x6c69cf50a451f1ec905e954bf1fa22970f371a72a5aa9f8e3a43a18fdd980bec \ +//! --runs 10 +//! ``` +//! +//! ### Download a block and pre-state from a beaconAPI to the filesystem +//! +//! Download a block and pre-state to the filesystem, without performing any transitions: +//! +//! ```ignore +//! lcli transition-blocks \ +//! --beacon-url http://localhost:5052 \ +//! --block-id 0x6c69cf50a451f1ec905e954bf1fa22970f371a72a5aa9f8e3a43a18fdd980bec \ +//! --runs 0 \ +//! --block-output-path /tmp/block-0x6c69.ssz \ +//! --pre-state-output-path /tmp/pre-state-0x6c69.ssz +//! ``` +//! +//! ### Use a block and pre-state from the filesystem +//! +//! Do one run over the block and pre-state downloaded in the previous example and save the post +//! state to file: +//! +//! ```ignore +//! lcli transition-blocks \ +//! --block-path /tmp/block-0x6c69.ssz \ +//! --pre-state-path /tmp/pre-state-0x6c69.ssz +//! --post-state-output-path /tmp/post-state-0x6c69.ssz +//! ``` +//! +//! ### Isolate block processing for benchmarking +//! +//! Try to isolate block processing as much as possible for benchmarking: +//! +//! ```ignore +//! lcli transition-blocks \ +//! --block-path /tmp/block-0x6c69.ssz \ +//! --pre-state-path /tmp/pre-state-0x6c69.ssz \ +//! --runs 10 \ +//! --exclude-cache-builds \ +//! --exclude-post-block-thc +//! ``` +use beacon_chain::{ + test_utils::EphemeralHarnessType, validator_pubkey_cache::ValidatorPubkeyCache, +}; use clap::ArgMatches; -use eth2_network_config::Eth2NetworkConfig; +use clap_utils::{parse_optional, parse_required}; +use environment::{null_logger, Environment}; +use eth2::{ + types::{BlockId, StateId}, + BeaconNodeHttpClient, SensitiveUrl, Timeouts, +}; use ssz::Encode; use state_processing::{ - per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, - VerifyBlockRoot, + block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, }; +use std::borrow::Cow; use std::fs::File; use std::io::prelude::*; use std::path::{Path, PathBuf}; -use types::{BeaconState, ChainSpec, EthSpec, SignedBeaconBlock}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use store::HotColdDB; +use types::{BeaconState, ChainSpec, CloneConfig, EthSpec, Hash256, SignedBeaconBlock}; -pub fn run_transition_blocks<T: EthSpec>( - testnet_dir: PathBuf, - matches: &ArgMatches, -) -> Result<(), String> { - let pre_state_path = matches - .value_of("pre-state") - .ok_or("No pre-state file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse pre-state path: {}", e))?; +const HTTP_TIMEOUT: Duration = Duration::from_secs(10); - let block_path = matches - .value_of("block") - .ok_or("No block file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse block path: {}", e))?; +#[derive(Debug)] +struct Config { + no_signature_verification: bool, + exclude_cache_builds: bool, + exclude_post_block_thc: bool, +} - let output_path = matches - .value_of("output") - .ok_or("No output file supplied")? - .parse::<PathBuf>() - .map_err(|e| format!("Failed to parse output path: {}", e))?; +pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result<(), String> { + let spec = &T::default_spec(); + let executor = env.core_context().executor; + + /* + * Parse (most) CLI arguments. + */ + + let pre_state_path: Option<PathBuf> = parse_optional(matches, "pre-state-path")?; + let block_path: Option<PathBuf> = parse_optional(matches, "block-path")?; + let post_state_output_path: Option<PathBuf> = + parse_optional(matches, "post-state-output-path")?; + let pre_state_output_path: Option<PathBuf> = parse_optional(matches, "pre-state-output-path")?; + let block_output_path: Option<PathBuf> = parse_optional(matches, "block-output-path")?; + let beacon_url: Option<SensitiveUrl> = parse_optional(matches, "beacon-url")?; + let runs: usize = parse_required(matches, "runs")?; + let config = Config { + no_signature_verification: matches.is_present("no-signature-verification"), + exclude_cache_builds: matches.is_present("exclude-cache-builds"), + exclude_post_block_thc: matches.is_present("exclude-post-block-thc"), + }; info!("Using {} spec", T::spec_name()); - info!("Pre-state path: {:?}", pre_state_path); - info!("Block path: {:?}", block_path); + info!("Doing {} runs", runs); + info!("{:?}", &config); - let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?; - let spec = ð2_network_config.chain_spec::<T>()?; + /* + * Load the block and pre-state from disk or beaconAPI URL. + */ - let pre_state: BeaconState<T> = - load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; - let block: SignedBeaconBlock<T> = - load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?; + let (mut pre_state, mut state_root_opt, block) = match (pre_state_path, block_path, beacon_url) + { + (Some(pre_state_path), Some(block_path), None) => { + info!("Block path: {:?}", pre_state_path); + info!("Pre-state path: {:?}", block_path); + let pre_state = load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?; + let block = load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?; + (pre_state, None, block) + } + (None, None, Some(beacon_url)) => { + let block_id: BlockId = parse_required(matches, "block-id")?; + let client = BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(HTTP_TIMEOUT)); + executor + .handle() + .ok_or("shutdown in progress")? + .block_on(async move { + let block = client + .get_beacon_blocks(block_id) + .await + .map_err(|e| format!("Failed to download block: {:?}", e))? + .ok_or_else(|| format!("Unable to locate block at {:?}", block_id))? + .data; - let t = std::time::Instant::now(); - let mut post_state = do_transition(pre_state.clone(), block.clone(), spec)?; - println!("Total transition time: {}ms", t.elapsed().as_millis()); + if block.slot() == spec.genesis_slot { + return Err("Cannot run on the genesis block".to_string()); + } - if post_state.update_tree_hash_cache().unwrap() != block.state_root() { - return Err("state root mismatch".into()); + let parent_block: SignedBeaconBlock<T> = client + .get_beacon_blocks(BlockId::Root(block.parent_root())) + .await + .map_err(|e| format!("Failed to download parent block: {:?}", e))? + .ok_or_else(|| format!("Unable to locate parent block at {:?}", block_id))? + .data; + + let state_root = parent_block.state_root(); + let state_id = StateId::Root(state_root); + let pre_state = client + .get_debug_beacon_states::<T>(state_id) + .await + .map_err(|e| format!("Failed to download state: {:?}", e))? + .ok_or_else(|| format!("Unable to locate state at {:?}", state_id))? + .data; + + Ok((pre_state, Some(state_root), block)) + }) + .map_err(|e| format!("Failed to complete task: {:?}", e))? + } + _ => { + return Err( + "must supply *both* --pre-state-path and --block-path *or* only --beacon-url" + .into(), + ) + } + }; + + // Compute the block root. + let block_root = block.canonical_root(); + + /* + * Create a `BeaconStore` and `ValidatorPubkeyCache` for block signature verification. + */ + + let store = HotColdDB::open_ephemeral( + <_>::default(), + spec.clone(), + null_logger().map_err(|e| format!("Failed to create null_logger: {:?}", e))?, + ) + .map_err(|e| format!("Failed to create ephemeral store: {:?}", e))?; + let store = Arc::new(store); + + debug!("Building pubkey cache (might take some time)"); + let validator_pubkey_cache = ValidatorPubkeyCache::new(&pre_state, store) + .map_err(|e| format!("Failed to create pubkey cache: {:?}", e))?; + + /* + * If cache builds are excluded from the timings, build them early so they are available for + * each run. + */ + + if config.exclude_cache_builds { + pre_state + .build_all_caches(spec) + .map_err(|e| format!("Unable to build caches: {:?}", e))?; + let state_root = pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build THC: {:?}", e))?; + + if state_root_opt.map_or(false, |expected| expected != state_root) { + return Err(format!( + "State root mismatch! Expected {}, computed {}", + state_root_opt.unwrap(), + state_root + )); + } + state_root_opt = Some(state_root); } - let mut output_file = - File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + /* + * Perform the core "runs". + */ - output_file - .write_all(&post_state.as_ssz_bytes()) - .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + let mut output_post_state = None; + for i in 0..runs { + let pre_state = pre_state.clone_with(CloneConfig::all()); + let block = block.clone(); + + let start = Instant::now(); + + let post_state = do_transition( + pre_state, + block_root, + block, + state_root_opt, + &config, + &validator_pubkey_cache, + spec, + )?; + + let duration = Instant::now().duration_since(start); + info!("Run {}: {:?}", i, duration); + + if output_post_state.is_none() { + output_post_state = Some(post_state) + } + } + + /* + * Write artifacts to disk, if required. + */ + + if let Some(path) = post_state_output_path { + let output_post_state = output_post_state.ok_or_else(|| { + format!( + "Post state was not computed, cannot save to disk (runs = {})", + runs + ) + })?; + + let mut output_file = + File::create(path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + + output_file + .write_all(&output_post_state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + } + + if let Some(path) = pre_state_output_path { + let mut output_file = + File::create(path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + + output_file + .write_all(&pre_state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + } + + if let Some(path) = block_output_path { + let mut output_file = + File::create(path).map_err(|e| format!("Unable to create output file: {:?}", e))?; + + output_file + .write_all(&block.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + } drop(pre_state); drop(post_state); @@ -67,60 +293,100 @@ pub fn run_transition_blocks<T: EthSpec>( fn do_transition<T: EthSpec>( mut pre_state: BeaconState<T>, + block_root: Hash256, block: SignedBeaconBlock<T>, + mut state_root_opt: Option<Hash256>, + config: &Config, + validator_pubkey_cache: &ValidatorPubkeyCache<EphemeralHarnessType<T>>, spec: &ChainSpec, ) -> Result<BeaconState<T>, String> { - let t = std::time::Instant::now(); - pre_state - .build_all_caches(spec) - .map_err(|e| format!("Unable to build caches: {:?}", e))?; - println!("Build all caches: {}ms", t.elapsed().as_millis()); + if !config.exclude_cache_builds { + let t = Instant::now(); + pre_state + .build_all_caches(spec) + .map_err(|e| format!("Unable to build caches: {:?}", e))?; + debug!("Build caches: {:?}", t.elapsed()); - let t = std::time::Instant::now(); - pre_state - .update_tree_hash_cache() - .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; - println!("Initial tree hash: {}ms", t.elapsed().as_millis()); + let t = Instant::now(); + let state_root = pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + debug!("Initial tree hash: {:?}", t.elapsed()); - // Transition the parent state to the block slot. - let t = std::time::Instant::now(); - for i in pre_state.slot().as_u64()..block.slot().as_u64() { - per_slot_processing(&mut pre_state, None, spec) - .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; + if state_root_opt.map_or(false, |expected| expected != state_root) { + return Err(format!( + "State root mismatch! Expected {}, computed {}", + state_root_opt.unwrap(), + state_root + )); + } + state_root_opt = Some(state_root); } println!("Slot processing: {}ms", t.elapsed().as_millis()); - let t = std::time::Instant::now(); - pre_state - .update_tree_hash_cache() - .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; - println!("Pre-block tree hash: {}ms", t.elapsed().as_millis()); + let state_root = state_root_opt.ok_or("Failed to compute state root, internal error")?; - let t = std::time::Instant::now(); + // Transition the parent state to the block slot. + let t = Instant::now(); + for i in pre_state.slot().as_u64()..block.slot().as_u64() { + per_slot_processing(&mut pre_state, Some(state_root), spec) + .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; + } + debug!("Slot processing: {:?}", t.elapsed()); + + let t = Instant::now(); pre_state .build_all_caches(spec) .map_err(|e| format!("Unable to build caches: {:?}", e))?; - println!("Build all caches (again): {}ms", t.elapsed().as_millis()); + debug!("Build all caches (again): {:?}", t.elapsed()); - let t = std::time::Instant::now(); - let mut ctxt = - ConsensusContext::new(block.slot()).set_proposer_index(block.message().proposer_index()); + if !config.no_signature_verification { + let get_pubkey = move |validator_index| { + validator_pubkey_cache + .get(validator_index) + .map(Cow::Borrowed) + }; + + let decompressor = move |pk_bytes| { + // Map compressed pubkey to validator index. + let validator_index = validator_pubkey_cache.get_index(pk_bytes)?; + // Map validator index to pubkey (respecting guard on unknown validators). + get_pubkey(validator_index) + }; + + let t = Instant::now(); + BlockSignatureVerifier::verify_entire_block( + &pre_state, + get_pubkey, + decompressor, + &block, + Some(block_root), + spec, + ) + .map_err(|e| format!("Invalid block signature: {:?}", e))?; + debug!("Batch verify block signatures: {:?}", t.elapsed()); + } + + let t = Instant::now(); per_block_processing( &mut pre_state, &block, + None, BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, &mut ctxt, spec, ) .map_err(|e| format!("State transition failed: {:?}", e))?; - println!("Process block: {}ms", t.elapsed().as_millis()); + debug!("Process block: {:?}", t.elapsed()); - let t = std::time::Instant::now(); - pre_state - .update_tree_hash_cache() - .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; - println!("Post-block tree hash: {}ms", t.elapsed().as_millis()); + if !config.exclude_post_block_thc { + let t = Instant::now(); + pre_state + .update_tree_hash_cache() + .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; + debug!("Post-block tree hash: {:?}", t.elapsed()); + } Ok(pre_state) } @@ -135,13 +401,8 @@ pub fn load_from_ssz_with<T>( let mut bytes = vec![]; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read from file {:?}: {:?}", path, e))?; - - let t = std::time::Instant::now(); + let t = Instant::now(); let result = decoder(&bytes, spec).map_err(|e| format!("Ssz decode failed: {:?}", e)); - println!( - "SSZ decoding {}: {}ms", - path.display(), - t.elapsed().as_millis() - ); + debug!("SSZ decoding {}: {:?}", path.display(), t.elapsed()); result } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 28d338c829..a87464dc88 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "lighthouse" -version = "2.2.1" +version = "3.1.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2021" autotests = false -rust-version = "1.58" +rust-version = "1.62" [features] -default = ["malloc_utils/jemalloc"] +default = ["slasher-mdbx", "malloc_utils/jemalloc"] # Writes debugging .ssz files to /tmp during block processing. write_ssz_files = ["beacon_node/write_ssz_files"] # Compiles the BLS crypto code so that the binary is portable across machines. @@ -20,6 +20,10 @@ milagro = ["bls/milagro"] spec-minimal = [] # Support Gnosis spec and Gnosis Beacon Chain. gnosis = [] +# Support slasher MDBX backend. +slasher-mdbx = ["slasher/mdbx"] +# Support slasher LMDB backend. +slasher-lmdb = ["slasher/lmdb"] [dependencies] beacon_node = { "path" = "../beacon_node" } @@ -50,6 +54,7 @@ directory = { path = "../common/directory" } unused_port = { path = "../common/unused_port" } store = { path = "../beacon_node/store" } database_manager = { path = "../database_manager" } +slasher = { path = "../slasher" } [dev-dependencies] tempfile = "3.1.0" @@ -57,6 +62,7 @@ validator_dir = { path = "../common/validator_dir" } slashing_protection = { path = "../validator_client/slashing_protection" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } sensitive_url = { path = "../common/sensitive_url" } +eth1 = { path = "../beacon_node/eth1" } [[test]] name = "lighthouse_tests" diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 160f696542..679964c0de 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -47,6 +47,7 @@ pub struct LoggerConfig<'a> { pub debug_level: &'a str, pub logfile_debug_level: &'a str, pub log_format: Option<&'a str>, + pub log_color: bool, pub max_log_size: u64, pub max_log_number: usize, pub compression: bool, @@ -139,7 +140,13 @@ impl<E: EthSpec> EnvironmentBuilder<E> { _ => return Err("Logging format provided is not supported".to_string()), } } else { - let stdout_decorator = slog_term::TermDecorator::new().build(); + let stdout_decorator_builder = slog_term::TermDecorator::new(); + let stdout_decorator = if config.log_color { + stdout_decorator_builder.force_color() + } else { + stdout_decorator_builder + } + .build(); let stdout_decorator = logging::AlignedTermDecorator::new(stdout_decorator, logging::MAX_MESSAGE_WIDTH); let stdout_drain = slog_term::FullFormat::new(stdout_decorator).build().fuse(); diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index be87083763..7897494cc4 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -138,6 +138,13 @@ fn main() { .takes_value(true) .global(true), ) + .arg( + Arg::with_name("log-color") + .long("log-color") + .alias("log-colour") + .help("Force outputting colors when emitting logs to the terminal.") + .global(true), + ) .arg( Arg::with_name("debug-level") .long("debug-level") @@ -227,7 +234,7 @@ fn main() { Accepts a 256-bit decimal integer (not a hex value). \ This flag should only be used if the user has a clear understanding that \ the broad Ethereum community has elected to override the terminal difficulty. \ - Incorrect use of this flag will cause your node to experience a consensus + Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .takes_value(true) .global(true) @@ -239,7 +246,7 @@ fn main() { .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ This flag should only be used if the user has a clear understanding that \ the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus + Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-epoch-override") .takes_value(true) @@ -252,7 +259,7 @@ fn main() { .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ parameter. This flag should only be used if the user has a clear understanding \ that the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus + Incorrect use of this flag will cause your node to experience a consensus \ failure. Be extremely careful with this flag.") .requires("terminal-block-hash-override") .takes_value(true) @@ -372,6 +379,8 @@ fn run<E: EthSpec>( let log_format = matches.value_of("log-format"); + let log_color = matches.is_present("log-color"); + let logfile_debug_level = matches .value_of("logfile-debug-level") .ok_or("Expected --logfile-debug-level flag")?; @@ -424,6 +433,7 @@ fn run<E: EthSpec>( debug_level, logfile_debug_level, log_format, + log_color, max_log_size: logfile_max_size * 1_024 * 1_024, max_log_number: logfile_max_number, compression: logfile_compress, diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 06b0303c69..696830a0d1 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -494,6 +494,8 @@ fn validator_import_launchpad() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -614,6 +616,8 @@ fn validator_import_launchpad_no_password_then_add_password() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -638,6 +642,8 @@ fn validator_import_launchpad_no_password_then_add_password() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: dst_keystore_dir.join(KEYSTORE_NAME), @@ -738,6 +744,8 @@ fn validator_import_launchpad_password_file() { voting_public_key: keystore.public_key().unwrap(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 5748bbd341..b28c1a0c3e 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,6 +1,7 @@ -use beacon_node::ClientConfig as Config; +use beacon_node::{beacon_chain::CountUnrealizedFull, ClientConfig as Config}; use crate::exec::{CommandLineTestExec, CompletedTest}; +use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; use std::io::Write; @@ -10,7 +11,7 @@ use std::process::Command; use std::str::FromStr; use std::string::ToString; use tempfile::TempDir; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, Hash256, MainnetEthSpec}; +use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp_port, unused_udp_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -66,7 +67,10 @@ fn staking_flag() { .with_config(|config| { assert!(config.http_api.enabled); assert!(config.sync_eth1_chain); - assert_eq!(config.eth1.endpoints[0].to_string(), DEFAULT_ETH1_ENDPOINT); + assert_eq!( + config.eth1.endpoints.get_endpoints()[0].to_string(), + DEFAULT_ETH1_ENDPOINT + ); }); } @@ -128,6 +132,106 @@ fn fork_choice_before_proposal_timeout_zero() { .with_config(|config| assert_eq!(config.chain.fork_choice_before_proposal_timeout_ms, 0)); } +#[test] +fn paranoid_block_proposal_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.paranoid_block_proposal)); +} + +#[test] +fn paranoid_block_proposal_on() { + CommandLineTest::new() + .flag("paranoid-block-proposal", None) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.paranoid_block_proposal)); +} + +#[test] +fn count_unrealized_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_no_arg() { + CommandLineTest::new() + .flag("count-unrealized", None) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_false() { + CommandLineTest::new() + .flag("count-unrealized", Some("false")) + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_true() { + CommandLineTest::new() + .flag("count-unrealized", Some("true")) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.count_unrealized)); +} + +#[test] +fn count_unrealized_full_no_arg() { + CommandLineTest::new() + .flag("count-unrealized-full", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.count_unrealized_full, + CountUnrealizedFull::False + ) + }); +} + +#[test] +fn count_unrealized_full_false() { + CommandLineTest::new() + .flag("count-unrealized-full", Some("false")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.count_unrealized_full, + CountUnrealizedFull::False + ) + }); +} + +#[test] +fn count_unrealized_full_true() { + CommandLineTest::new() + .flag("count-unrealized-full", Some("true")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.count_unrealized_full, + CountUnrealizedFull::True + ) + }); +} + +#[test] +fn reset_payload_statuses_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(!config.chain.always_reset_payload_statuses)); +} + +#[test] +fn reset_payload_statuses_present() { + CommandLineTest::new() + .flag("reset-payload-statuses", None) + .run_with_zero_port() + .with_config(|config| assert!(config.chain.always_reset_payload_statuses)); +} + #[test] fn freezer_dir_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); @@ -196,18 +300,21 @@ fn eth1_endpoints_flag() { .run_with_zero_port() .with_config(|config| { assert_eq!( - config.eth1.endpoints[0].full.to_string(), + config.eth1.endpoints.get_endpoints()[0].full.to_string(), "http://localhost:9545/" ); assert_eq!( - config.eth1.endpoints[0].to_string(), + config.eth1.endpoints.get_endpoints()[0].to_string(), "http://localhost:9545/" ); assert_eq!( - config.eth1.endpoints[1].full.to_string(), + config.eth1.endpoints.get_endpoints()[1].full.to_string(), "https://infura.io/secret" ); - assert_eq!(config.eth1.endpoints[1].to_string(), "https://infura.io/"); + assert_eq!( + config.eth1.endpoints.get_endpoints()[1].to_string(), + "https://infura.io/" + ); assert!(config.sync_eth1_chain); }); } @@ -225,47 +332,128 @@ fn eth1_purge_cache_flag() { .run_with_zero_port() .with_config(|config| assert!(config.eth1.purge_cache)); } - -// Tests for Bellatrix flags. #[test] -fn merge_flag() { +fn eth1_cache_follow_distance_default() { CommandLineTest::new() - .flag("merge", None) .run_with_zero_port() - .with_config(|config| assert!(config.execution_layer.is_some())); + .with_config(|config| { + assert_eq!(config.eth1.cache_follow_distance, None); + assert_eq!(config.eth1.cache_follow_distance(), 3 * 2048 / 4); + }); } #[test] -fn merge_execution_endpoints_flag() { +fn eth1_cache_follow_distance_manual() { + CommandLineTest::new() + .flag("eth1-cache-follow-distance", Some("128")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.eth1.cache_follow_distance, Some(128)); + assert_eq!(config.eth1.cache_follow_distance(), 128); + }); +} + +// Tests for Bellatrix flags. +fn run_merge_execution_endpoints_flag_test(flag: &str) { use sensitive_url::SensitiveUrl; let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; - let endpoints = urls - .iter() - .map(|s| SensitiveUrl::parse(s).unwrap()) - .collect::<Vec<_>>(); + // we don't support redundancy for execution-endpoints + // only the first provided endpoint is parsed. + let mut endpoint_arg = urls[0].to_string(); - for url in urls.into_iter().skip(1) { + for url in urls.iter().skip(1) { endpoint_arg.push(','); endpoint_arg.push_str(url); } + + let (_dirs, jwts): (Vec<_>, Vec<_>) = (0..2) + .map(|i| { + let dir = TempDir::new().expect("Unable to create temporary directory"); + let path = dir.path().join(format!("jwt-{}", i)); + (dir, path) + }) + .unzip(); + + let mut jwts_arg = jwts[0].as_os_str().to_str().unwrap().to_string(); + for jwt in jwts.iter().skip(1) { + jwts_arg.push(','); + jwts_arg.push_str(jwt.as_os_str().to_str().unwrap()); + } + // this is way better but intersperse is still a nightly feature :/ // let endpoint_arg: String = urls.into_iter().intersperse(",").collect(); CommandLineTest::new() - .flag("merge", None) - .flag("execution-endpoints", Some(&endpoint_arg)) + .flag(flag, Some(&endpoint_arg)) + .flag("execution-jwt", Some(&jwts_arg)) .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.execution_endpoints, endpoints) + assert_eq!(config.execution_endpoints.len(), 1); + assert_eq!( + config.execution_endpoints[0], + SensitiveUrl::parse(&urls[0]).unwrap() + ); + // Only the first secret file should be used. + assert_eq!(config.secret_files, vec![jwts[0].clone()]); }); } #[test] +fn merge_execution_endpoints_flag() { + run_merge_execution_endpoints_flag_test("execution-endpoints") +} +#[test] +fn merge_execution_endpoint_flag() { + run_merge_execution_endpoints_flag_test("execution-endpoint") +} +fn run_execution_endpoints_overrides_eth1_endpoints_test(eth1_flag: &str, execution_flag: &str) { + use sensitive_url::SensitiveUrl; + + let eth1_endpoint = "http://bad.bad"; + let execution_endpoint = "http://good.good"; + + assert!(eth1_endpoint != execution_endpoint); + + let dir = TempDir::new().expect("Unable to create temporary directory"); + let jwt_path = dir.path().join("jwt-file"); + + CommandLineTest::new() + .flag(eth1_flag, Some(ð1_endpoint)) + .flag(execution_flag, Some(&execution_endpoint)) + .flag("execution-jwt", jwt_path.as_os_str().to_str()) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.execution_layer.as_ref().unwrap().execution_endpoints, + vec![SensitiveUrl::parse(execution_endpoint).unwrap()] + ); + + // The eth1 endpoint should have been set to the --execution-endpoint value in defiance + // of --eth1-endpoints. + assert_eq!( + config.eth1.endpoints, + Eth1Endpoint::Auth { + endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), + jwt_path: jwt_path.clone(), + jwt_id: None, + jwt_version: None, + } + ); + }); +} +#[test] +fn execution_endpoints_overrides_eth1_endpoints() { + run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoints", "execution-endpoints"); +} +#[test] +fn execution_endpoint_overrides_eth1_endpoint() { + run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoint", "execution-endpoint"); +} +#[test] fn merge_jwt_secrets_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); let mut file = File::create(dir.path().join("jwtsecrets")).expect("Unable to create file"); file.write_all(b"0x3cbc11b0d8fa16f3344eacfd6ff6430b9d30734450e8adcf5400f88d327dcb33") .expect("Unable to write to file"); CommandLineTest::new() - .flag("merge", None) .flag("execution-endpoints", Some("http://localhost:8551/")) .flag( "jwt-secrets", @@ -283,8 +471,13 @@ fn merge_jwt_secrets_flag() { } #[test] fn merge_fee_recipient_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() - .flag("merge", None) + .flag("execution-endpoint", Some("http://meow.cats")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) .flag( "suggested-fee-recipient", Some("0x00000000219ab540356cbb839cbe05303d7705fa"), @@ -298,20 +491,158 @@ fn merge_fee_recipient_flag() { ); }); } +fn run_payload_builder_flag_test(flag: &str, builders: &str) { + use sensitive_url::SensitiveUrl; + + let all_builders: Vec<_> = builders + .split(",") + .map(|builder| SensitiveUrl::parse(builder).expect("valid builder url")) + .collect(); + run_payload_builder_flag_test_with_config(flag, builders, None, None, |config| { + let config = config.execution_layer.as_ref().unwrap(); + // Only first provided endpoint is parsed as we don't support + // redundancy. + assert_eq!(config.builder_url, all_builders.get(0).cloned()); + }) +} +fn run_payload_builder_flag_test_with_config<F: Fn(&Config)>( + flag: &str, + builders: &str, + additional_flag: Option<&str>, + additional_flag_value: Option<&str>, + f: F, +) { + let dir = TempDir::new().expect("Unable to create temporary directory"); + let mut test = CommandLineTest::new(); + test.flag("execution-endpoint", Some("http://meow.cats")) + .flag( + "execution-jwt", + dir.path().join("jwt-file").as_os_str().to_str(), + ) + .flag(flag, Some(builders)); + if let Some(additional_flag_name) = additional_flag { + test.flag(additional_flag_name, additional_flag_value); + } + test.run_with_zero_port().with_config(f); +} + #[test] -fn jwt_optional_flags() { +fn payload_builder_flags() { + run_payload_builder_flag_test("builder", "http://meow.cats"); + run_payload_builder_flag_test("payload-builder", "http://meow.cats"); + run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); +} + +#[test] +fn builder_fallback_flags() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-skips"), + Some("7"), + |config| { + assert_eq!(config.chain.builder_fallback_skips, 7); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-skips-per-epoch"), + Some("11"), + |config| { + assert_eq!(config.chain.builder_fallback_skips_per_epoch, 11); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-epochs-since-finalization"), + Some("4"), + |config| { + assert_eq!(config.chain.builder_fallback_epochs_since_finalization, 4); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-disable-checks"), + None, + |config| { + assert_eq!(config.chain.builder_fallback_disable_checks, true); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-profit-threshold"), + Some("1000000000000000000000000"), + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_profit_threshold, + 1000000000000000000000000 + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert_eq!( + config + .execution_layer + .as_ref() + .unwrap() + .builder_profit_threshold, + 0 + ); + }, + ); +} + +fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { + use sensitive_url::SensitiveUrl; + + let dir = TempDir::new().expect("Unable to create temporary directory"); + let execution_endpoint = "http://meow.cats"; + let jwt_file = "jwt-file"; + let id = "bn-1"; + let version = "Lighthouse-v2.1.3"; CommandLineTest::new() - .flag("merge", None) - .flag("jwt-id", Some("bn-1")) - .flag("jwt-version", Some("Lighthouse-v2.1.3")) + .flag("execution-endpoint", Some(execution_endpoint.clone())) + .flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str()) + .flag(jwt_id_flag, Some(id)) + .flag(jwt_version_flag, Some(version)) .run_with_zero_port() .with_config(|config| { - let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.jwt_id, Some("bn-1".to_string())); - assert_eq!(config.jwt_version, Some("Lighthouse-v2.1.3".to_string())); + let el_config = config.execution_layer.as_ref().unwrap(); + assert_eq!(el_config.jwt_id, Some(id.to_string())); + assert_eq!(el_config.jwt_version, Some(version.to_string())); + assert_eq!( + config.eth1.endpoints, + Eth1Endpoint::Auth { + endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), + jwt_path: dir.path().join(jwt_file), + jwt_id: Some(id.to_string()), + jwt_version: Some(version.to_string()), + } + ); }); } #[test] +fn jwt_optional_flags() { + run_jwt_optional_flags_test("execution-jwt", "execution-jwt-id", "execution-jwt-version"); +} +#[test] +fn jwt_optional_alias_flags() { + run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version"); +} +#[test] fn terminal_total_difficulty_override_flag() { use beacon_node::beacon_chain::types::Uint256; CommandLineTest::new() @@ -719,6 +1050,21 @@ fn http_tls_flags() { }); } +#[test] +fn http_spec_fork_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.spec_fork_name, None)); +} + +#[test] +fn http_spec_fork_override() { + CommandLineTest::new() + .flag("http-spec-fork", Some("altair")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.http_api.spec_fork_name, Some(ForkName::Altair))); +} + // Tests for Metrics flags. #[test] fn metrics_flag() { @@ -1043,8 +1389,34 @@ fn slasher_broadcast_flag() { assert!(slasher_config.broadcast); }); } + #[test] -pub fn malloc_tuning_flag() { +fn slasher_backend_default() { + CommandLineTest::new() + .flag("slasher", None) + .run_with_zero_port() + .with_config(|config| { + let slasher_config = config.slasher.as_ref().unwrap(); + assert_eq!(slasher_config.backend, slasher::DatabaseBackend::Mdbx); + }); +} + +#[test] +fn slasher_backend_override_to_default() { + // Hard to test this flag because all but one backend is disabled by default and the backend + // called "disabled" results in a panic. + CommandLineTest::new() + .flag("slasher", None) + .flag("slasher-backend", Some("mdbx")) + .run_with_zero_port() + .with_config(|config| { + let slasher_config = config.slasher.as_ref().unwrap(); + assert_eq!(slasher_config.backend, slasher::DatabaseBackend::Mdbx); + }); +} + +#[test] +fn malloc_tuning_flag() { CommandLineTest::new() .flag("disable-malloc-tuning", None) .run_with_zero_port() @@ -1067,3 +1439,16 @@ fn ensure_panic_on_failed_launch() { assert_eq!(slasher_config.chunk_size, 10); }); } + +#[test] +fn monitoring_endpoint() { + CommandLineTest::new() + .flag("monitoring-endpoint", Some("http://example:8000")) + .flag("monitoring-endpoint-period", Some("30")) + .run_with_zero_port() + .with_config(|config| { + let api_conf = config.monitoring_api.as_ref().unwrap(); + assert_eq!(api_conf.monitoring_endpoint.as_str(), "http://example:8000"); + assert_eq!(api_conf.update_period_secs, Some(30)); + }); +} diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index c14f5d27ba..a9b76c2754 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -249,66 +249,6 @@ fn fee_recipient_flag() { ) }); } -#[test] -fn fee_recipient_file_flag() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - let mut file = - File::create(dir.path().join("fee_recipient.txt")).expect("Unable to create file"); - let new_key = Keypair::random(); - let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = "default:0x00000000219ab540356cbb839cbe05303d7705fa"; - file.write_all(contents.as_bytes()) - .expect("Unable to write to file"); - CommandLineTest::new() - .flag( - "suggested-fee-recipient-file", - dir.path().join("fee_recipient.txt").as_os_str().to_str(), - ) - .run() - .with_config(|config| { - // Public key not present so load default. - assert_eq!( - config - .fee_recipient_file - .clone() - .unwrap() - .load_fee_recipient(&pubkeybytes) - .unwrap(), - Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) - ) - }); -} -#[test] -fn fee_recipient_file_with_pk_flag() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - let mut file = - File::create(dir.path().join("fee_recipient.txt")).expect("Unable to create file"); - let new_key = Keypair::random(); - let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = format!( - "{}:0x00000000219ab540356cbb839cbe05303d7705fa", - pubkeybytes.to_string() - ); - file.write_all(contents.as_bytes()) - .expect("Unable to write to file"); - CommandLineTest::new() - .flag( - "suggested-fee-recipient-file", - dir.path().join("fee_recipient.txt").as_os_str().to_str(), - ) - .run() - .with_config(|config| { - assert_eq!( - config - .fee_recipient_file - .clone() - .unwrap() - .load_fee_recipient(&pubkeybytes) - .unwrap(), - Some(Address::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap()) - ) - }); -} // Tests for HTTP flags. #[test] @@ -426,9 +366,14 @@ fn metrics_allow_origin_all_flag() { pub fn malloc_tuning_flag() { CommandLineTest::new() .flag("disable-malloc-tuning", None) - // Simply ensure that the node can start with this flag, it's very difficult to observe the - // effects of it. - .run(); + .run() + .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, false)); +} +#[test] +pub fn malloc_tuning_default() { + CommandLineTest::new() + .run() + .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, true)); } #[test] fn doppelganger_protection_flag() { @@ -443,3 +388,57 @@ fn no_doppelganger_protection_flag() { .run() .with_config(|config| assert!(!config.enable_doppelganger_protection)); } +#[test] +fn no_gas_limit_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(config.gas_limit.is_none())); +} +#[test] +fn gas_limit_flag() { + CommandLineTest::new() + .flag("gas-limit", Some("600")) + .flag("builder-proposals", None) + .run() + .with_config(|config| assert_eq!(config.gas_limit, Some(600))); +} +#[test] +fn no_builder_proposals_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.builder_proposals)); +} +#[test] +fn builder_proposals_flag() { + CommandLineTest::new() + .flag("builder-proposals", None) + .run() + .with_config(|config| assert!(config.builder_proposals)); +} +#[test] +fn no_builder_registration_timestamp_override_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(config.builder_registration_timestamp_override.is_none())); +} +#[test] +fn builder_registration_timestamp_override_flag() { + CommandLineTest::new() + .flag("builder-registration-timestamp-override", Some("100")) + .run() + .with_config(|config| { + assert_eq!(config.builder_registration_timestamp_override, Some(100)) + }); +} +#[test] +fn monitoring_endpoint() { + CommandLineTest::new() + .flag("monitoring-endpoint", Some("http://example:8000")) + .flag("monitoring-endpoint-period", Some("30")) + .run() + .with_config(|config| { + let api_conf = config.monitoring_api.as_ref().unwrap(); + assert_eq!(api_conf.monitoring_endpoint.as_str(), "http://example:8000"); + assert_eq!(api_conf.update_period_secs, Some(30)); + }); +} diff --git a/scripts/local_testnet/print_logs.sh b/scripts/local_testnet/dump_logs.sh similarity index 83% rename from scripts/local_testnet/print_logs.sh rename to scripts/local_testnet/dump_logs.sh index 2a9e7822a6..dc5f4edd38 100755 --- a/scripts/local_testnet/print_logs.sh +++ b/scripts/local_testnet/dump_logs.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Print the tail of all the logs output from local testnet +# Print all the logs output from local testnet set -Eeuo pipefail @@ -12,6 +12,6 @@ do echo "=============================================================================" echo "$f" echo "=============================================================================" - tail "$f" + cat "$f" echo "" done diff --git a/scripts/local_testnet/ganache_test_node.sh b/scripts/local_testnet/ganache_test_node.sh index 7d97f2196a..a489c33224 100755 --- a/scripts/local_testnet/ganache_test_node.sh +++ b/scripts/local_testnet/ganache_test_node.sh @@ -11,5 +11,4 @@ exec ganache \ --mnemonic "$ETH1_NETWORK_MNEMONIC" \ --port 8545 \ --blockTime $SECONDS_PER_ETH1_BLOCK \ - --networkId "$NETWORK_ID" \ - --chain.chainId "$NETWORK_ID" + --chain.chainId "$CHAIN_ID" diff --git a/scripts/local_testnet/kill_processes.sh b/scripts/local_testnet/kill_processes.sh index be6b7f3d66..d63725ac14 100755 --- a/scripts/local_testnet/kill_processes.sh +++ b/scripts/local_testnet/kill_processes.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Kill processes -set -Eeuo pipefail +set -Euo pipefail # First parameter is the file with # one pid per line. diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index 6f0b070915..a1348363a9 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -32,7 +32,7 @@ lcli \ --genesis-delay $GENESIS_DELAY \ --genesis-fork-version $GENESIS_FORK_VERSION \ --altair-fork-epoch $ALTAIR_FORK_EPOCH \ - --eth1-id $NETWORK_ID \ + --eth1-id $CHAIN_ID \ --eth1-follow-distance 1 \ --seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index a5c6c0b5eb..dcc0a5382a 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -5,14 +5,19 @@ set -Eeuo pipefail source ./vars.env +# Set a higher ulimit in case we want to import 1000s of validators. +ulimit -n 65536 + # VC_COUNT is defaulted in vars.env DEBUG_LEVEL=${DEBUG_LEVEL:-info} +BUILDER_PROPOSALS= # Get options -while getopts "v:d:h" flag; do +while getopts "v:d:ph" flag; do case "${flag}" in v) VC_COUNT=${OPTARG};; d) DEBUG_LEVEL=${OPTARG};; + p) BUILDER_PROPOSALS="-p";; h) validators=$(( $VALIDATOR_COUNT / $BN_COUNT )) echo "Start local testnet, defaults: 1 eth1 node, $BN_COUNT beacon nodes," @@ -23,6 +28,7 @@ while getopts "v:d:h" flag; do echo "Options:" echo " -v: VC_COUNT default: $VC_COUNT" echo " -d: DEBUG_LEVEL default: info" + echo " -p: enable private tx proposals" echo " -h: this help" exit ;; @@ -113,7 +119,7 @@ done # Start requested number of validator clients for (( vc=1; vc<=$VC_COUNT; vc++ )); do - execute_command_add_PID validator_node_$vc.log ./validator_client.sh $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) $DEBUG_LEVEL + execute_command_add_PID validator_node_$vc.log ./validator_client.sh $BUILDER_PROPOSALS -d $DEBUG_LEVEL $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) done echo "Started!" diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 5aa75dfe2d..975a2a6753 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -10,13 +10,24 @@ set -Eeuo pipefail source ./vars.env -DEBUG_LEVEL=${3:-info} +DEBUG_LEVEL=info + +BUILDER_PROPOSALS= + +# Get options +while getopts "pd:" flag; do + case "${flag}" in + p) BUILDER_PROPOSALS="--builder-proposals";; + d) DEBUG_LEVEL=${OPTARG};; + esac +done exec lighthouse \ --debug-level $DEBUG_LEVEL \ vc \ - --datadir $1 \ + $BUILDER_PROPOSALS \ + --datadir ${@:$OPTIND:1} \ --testnet-dir $TESTNET_DIR \ --init-slashing-protection \ - --beacon-nodes $2 \ + --beacon-nodes ${@:$OPTIND+1:1} \ $VC_ARGS diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 208fbb6d85..b6ea89794f 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -18,7 +18,7 @@ GENESIS_VALIDATOR_COUNT=80 # Number of beacon_node instances that you intend to run BN_COUNT=4 -# Number of valicator clients +# Number of validator clients VC_COUNT=$BN_COUNT # Number of seconds to delay to start genesis block. @@ -30,7 +30,7 @@ GENESIS_DELAY=0 BOOTNODE_PORT=4242 # Network ID and Chain ID of local eth1 test network -NETWORK_ID=4242 +CHAIN_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=18446744073709551615 diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index 6cc0dd3b8a..376fe3d8c5 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -18,7 +18,7 @@ GENESIS_VALIDATOR_COUNT=80 # Number of beacon_node instances that you intend to run BN_COUNT=4 -# Number of valicator clients +# Number of validator clients VC_COUNT=$BN_COUNT # Number of seconds to delay to start genesis block. @@ -30,7 +30,7 @@ GENESIS_DELAY=0 BOOTNODE_PORT=4242 # Network ID and Chain ID of local eth1 test network -NETWORK_ID=4242 +CHAIN_ID=4242 # Hard fork configuration ALTAIR_FORK_EPOCH=18446744073709551615 diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 22b3408ab3..0f24fe9f04 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -4,6 +4,11 @@ version = "0.1.0" authors = ["Michael Sproul <michael@sigmaprime.io>"] edition = "2021" +[features] +default = ["mdbx"] +mdbx = ["dep:mdbx"] +lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] + [dependencies] bincode = "1.3.1" byteorder = "1.3.4" @@ -13,7 +18,6 @@ flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } filesystem = { path = "../common/filesystem" } -mdbx = { package = "libmdbx", version = "0.1.0" } lru = "0.7.1" parking_lot = "0.12.0" rand = "0.8.5" @@ -25,6 +29,12 @@ sloggers = { version = "2.1.1", features = ["json"] } tree_hash = "0.4.1" tree_hash_derive = "0.4.0" types = { path = "../consensus/types" } +strum = { version = "0.24.1", features = ["derive"] } + +# MDBX is pinned at the last version with Windows and macOS support. +mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", tag = "v0.1.4", optional = true } +lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } [dev-dependencies] maplit = "1.0.2" diff --git a/slasher/service/src/service.rs b/slasher/service/src/service.rs index 88feff0bbc..091a95dc4c 100644 --- a/slasher/service/src/service.rs +++ b/slasher/service/src/service.rs @@ -216,14 +216,7 @@ impl<T: BeaconChainTypes> SlasherService<T> { }; // Add to local op pool. - if let Err(e) = beacon_chain.import_attester_slashing(verified_slashing) { - error!( - log, - "Beacon chain refused attester slashing"; - "error" => ?e, - "slashing" => ?slashing, - ); - } + beacon_chain.import_attester_slashing(verified_slashing); // Publish to the network if broadcast is enabled. if slasher.config().broadcast { diff --git a/slasher/src/array.rs b/slasher/src/array.rs index d9f1fab819..d9cb8a4ec6 100644 --- a/slasher/src/array.rs +++ b/slasher/src/array.rs @@ -1,9 +1,11 @@ use crate::metrics::{self, SLASHER_COMPRESSION_RATIO, SLASHER_NUM_CHUNKS_UPDATED}; -use crate::RwTransaction; -use crate::{AttesterSlashingStatus, Config, Error, IndexedAttesterRecord, SlasherDB}; +use crate::{ + AttesterSlashingStatus, Config, Database, Error, IndexedAttesterRecord, RwTransaction, + SlasherDB, +}; use flate2::bufread::{ZlibDecoder, ZlibEncoder}; use serde_derive::{Deserialize, Serialize}; -use std::borrow::{Borrow, Cow}; +use std::borrow::Borrow; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::convert::TryFrom; use std::io::Read; @@ -147,10 +149,7 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn fn next_start_epoch(start_epoch: Epoch, config: &Config) -> Epoch; - fn select_db<'txn, E: EthSpec>( - db: &SlasherDB<E>, - txn: &'txn RwTransaction<'txn>, - ) -> Result<mdbx::Database<'txn>, Error>; + fn select_db<E: EthSpec>(db: &SlasherDB<E>) -> &Database; fn load<E: EthSpec>( db: &SlasherDB<E>, @@ -160,11 +159,10 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn config: &Config, ) -> Result<Option<Self>, Error> { let disk_key = config.disk_key(validator_chunk_index, chunk_index); - let chunk_bytes: Cow<[u8]> = - match txn.get(&Self::select_db(db, txn)?, &disk_key.to_be_bytes())? { - Some(chunk_bytes) => chunk_bytes, - None => return Ok(None), - }; + let chunk_bytes = match txn.get(Self::select_db(db), &disk_key.to_be_bytes())? { + Some(chunk_bytes) => chunk_bytes, + None => return Ok(None), + }; let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes.borrow()))?; @@ -189,10 +187,9 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn metrics::set_float_gauge(&SLASHER_COMPRESSION_RATIO, compression_ratio); txn.put( - &Self::select_db(db, txn)?, + Self::select_db(db), &disk_key.to_be_bytes(), &compressed_value, - SlasherDB::<E>::write_flags(), )?; Ok(()) } @@ -296,11 +293,8 @@ impl TargetArrayChunk for MinTargetChunk { start_epoch / chunk_size * chunk_size - 1 } - fn select_db<'txn, E: EthSpec>( - db: &SlasherDB<E>, - txn: &'txn RwTransaction<'txn>, - ) -> Result<mdbx::Database<'txn>, Error> { - db.min_targets_db(txn) + fn select_db<E: EthSpec>(db: &SlasherDB<E>) -> &Database { + &db.databases.min_targets_db } } @@ -398,11 +392,8 @@ impl TargetArrayChunk for MaxTargetChunk { (start_epoch / chunk_size + 1) * chunk_size } - fn select_db<'txn, E: EthSpec>( - db: &SlasherDB<E>, - txn: &'txn RwTransaction<'txn>, - ) -> Result<mdbx::Database<'txn>, Error> { - db.max_targets_db(txn) + fn select_db<E: EthSpec>(db: &SlasherDB<E>) -> &Database { + &db.databases.max_targets_db } } diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 81aa4b597d..e2a58a406a 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -1,6 +1,7 @@ use crate::Error; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; +use strum::{Display, EnumString, EnumVariantNames}; use types::{Epoch, EthSpec, IndexedAttestation}; pub const DEFAULT_CHUNK_SIZE: usize = 16; @@ -12,8 +13,15 @@ pub const DEFAULT_MAX_DB_SIZE: usize = 256 * 1024; // 256 GiB pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: usize = 100_000; pub const DEFAULT_BROADCAST: bool = false; +#[cfg(feature = "mdbx")] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Mdbx; +#[cfg(all(feature = "lmdb", not(feature = "mdbx")))] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Lmdb; +#[cfg(not(any(feature = "mdbx", feature = "lmdb")))] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Disabled; + pub const MAX_HISTORY_LENGTH: usize = 1 << 16; -pub const MDBX_GROWTH_STEP: isize = 256 * (1 << 20); // 256 MiB +pub const MEGABYTE: usize = 1 << 20; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -32,6 +40,8 @@ pub struct Config { pub attestation_root_cache_size: usize, /// Whether to broadcast slashings found to the network. pub broadcast: bool, + /// Database backend to use. + pub backend: DatabaseBackend, } /// Immutable configuration parameters which are stored on disk and checked for consistency. @@ -42,6 +52,18 @@ pub struct DiskConfig { pub history_length: usize, } +#[derive( + Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Display, EnumString, EnumVariantNames, +)] +#[strum(serialize_all = "lowercase")] +pub enum DatabaseBackend { + #[cfg(feature = "mdbx")] + Mdbx, + #[cfg(feature = "lmdb")] + Lmdb, + Disabled, +} + impl Config { pub fn new(database_path: PathBuf) -> Self { Self { @@ -54,6 +76,7 @@ impl Config { max_db_size_mbs: DEFAULT_MAX_DB_SIZE, attestation_root_cache_size: DEFAULT_ATTESTATION_ROOT_CACHE_SIZE, broadcast: DEFAULT_BROADCAST, + backend: DEFAULT_BACKEND, } } diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 653eccfa72..c8046c80dc 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -1,19 +1,20 @@ -use crate::config::MDBX_GROWTH_STEP; +pub mod interface; +mod lmdb_impl; +mod mdbx_impl; + use crate::{ - metrics, utils::TxnMapFull, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, - Config, Environment, Error, ProposerSlashingStatus, RwTransaction, + metrics, AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Error, + ProposerSlashingStatus, }; use byteorder::{BigEndian, ByteOrder}; +use interface::{Environment, OpenDatabases, RwTransaction}; use lru::LruCache; -use mdbx::{Database, DatabaseFlags, Geometry, WriteFlags}; use parking_lot::Mutex; use serde::de::DeserializeOwned; use slog::{info, Logger}; use ssz::{Decode, Encode}; use std::borrow::{Borrow, Cow}; use std::marker::PhantomData; -use std::ops::Range; -use std::path::Path; use std::sync::Arc; use tree_hash::TreeHash; use types::{ @@ -50,10 +51,6 @@ const PROPOSERS_DB: &str = "proposers"; /// The number of DBs for MDBX to use (equal to the number of DBs defined above). const MAX_NUM_DBS: usize = 9; -/// Filename for the legacy (LMDB) database file, so that it may be deleted. -const LEGACY_DB_FILENAME: &str = "data.mdb"; -const LEGACY_DB_LOCK_FILENAME: &str = "lock.mdb"; - /// Constant key under which the schema version is stored in the `metadata_db`. const METADATA_VERSION_KEY: &[u8] = &[0]; /// Constant key under which the slasher configuration is stored in the `metadata_db`. @@ -64,11 +61,11 @@ const PROPOSER_KEY_SIZE: usize = 16; const CURRENT_EPOCH_KEY_SIZE: usize = 8; const INDEXED_ATTESTATION_ID_SIZE: usize = 6; const INDEXED_ATTESTATION_ID_KEY_SIZE: usize = 40; -const MEGABYTE: usize = 1 << 20; #[derive(Debug)] pub struct SlasherDB<E: EthSpec> { - pub(crate) env: Environment, + pub(crate) env: &'static Environment, + pub(crate) databases: OpenDatabases<'static>, /// LRU cache mapping indexed attestation IDs to their attestation data roots. attestation_root_cache: Mutex<LruCache<IndexedAttestationId, Hash256>>, pub(crate) config: Arc<Config>, @@ -249,42 +246,26 @@ fn ssz_decode<T: Decode>(bytes: Cow<[u8]>) -> Result<T, Error> { impl<E: EthSpec> SlasherDB<E> { pub fn open(config: Arc<Config>, log: Logger) -> Result<Self, Error> { - // Delete any legacy LMDB database. - Self::delete_legacy_file(&config.database_path, LEGACY_DB_FILENAME, &log)?; - Self::delete_legacy_file(&config.database_path, LEGACY_DB_LOCK_FILENAME, &log)?; + info!(log, "Opening slasher database"; "backend" => %config.backend); std::fs::create_dir_all(&config.database_path)?; - let env = Environment::new() - .set_max_dbs(MAX_NUM_DBS) - .set_geometry(Self::geometry(&config)) - .open_with_permissions(&config.database_path, 0o600)?; - - let txn = env.begin_rw_txn()?; - txn.create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; - txn.create_db(Some(INDEXED_ATTESTATION_ID_DB), Self::db_flags())?; - txn.create_db(Some(ATTESTERS_DB), Self::db_flags())?; - txn.create_db(Some(ATTESTERS_MAX_TARGETS_DB), Self::db_flags())?; - txn.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; - txn.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; - txn.create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?; - txn.create_db(Some(PROPOSERS_DB), Self::db_flags())?; - txn.create_db(Some(METADATA_DB), Self::db_flags())?; - txn.commit()?; + let env = Box::leak(Box::new(Environment::new(&config)?)); + let databases = env.create_databases()?; #[cfg(windows)] { - use filesystem::restrict_file_permissions; - let data = config.database_path.join("mdbx.dat"); - let lock = config.database_path.join("mdbx.lck"); - restrict_file_permissions(data).map_err(Error::DatabasePermissionsError)?; - restrict_file_permissions(lock).map_err(Error::DatabasePermissionsError)?; + for database_file in env.filenames(&config) { + filesystem::restrict_file_permissions(database_file) + .map_err(Error::DatabasePermissionsError)?; + } } let attestation_root_cache = Mutex::new(LruCache::new(config.attestation_root_cache_size)); let mut db = Self { env, + databases, attestation_root_cache, config, _phantom: PhantomData, @@ -307,102 +288,21 @@ impl<E: EthSpec> SlasherDB<E> { Ok(db) } - fn delete_legacy_file(slasher_dir: &Path, filename: &str, log: &Logger) -> Result<(), Error> { - let path = slasher_dir.join(filename); - - if path.is_file() { - info!( - log, - "Deleting legacy slasher DB"; - "file" => ?path.display(), - ); - std::fs::remove_file(&path)?; - } - Ok(()) - } - - fn open_db<'a>(&self, txn: &'a RwTransaction<'a>, name: &str) -> Result<Database<'a>, Error> { - Ok(txn.open_db(Some(name))?) - } - - pub fn indexed_attestation_db<'a>( - &self, - txn: &'a RwTransaction<'a>, - ) -> Result<Database<'a>, Error> { - self.open_db(txn, INDEXED_ATTESTATION_DB) - } - - pub fn indexed_attestation_id_db<'a>( - &self, - txn: &'a RwTransaction<'a>, - ) -> Result<Database<'a>, Error> { - self.open_db(txn, INDEXED_ATTESTATION_ID_DB) - } - - pub fn attesters_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, ATTESTERS_DB) - } - - pub fn attesters_max_targets_db<'a>( - &self, - txn: &'a RwTransaction<'a>, - ) -> Result<Database<'a>, Error> { - self.open_db(txn, ATTESTERS_MAX_TARGETS_DB) - } - - pub fn min_targets_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, MIN_TARGETS_DB) - } - - pub fn max_targets_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, MAX_TARGETS_DB) - } - - pub fn current_epochs_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, CURRENT_EPOCHS_DB) - } - - pub fn proposers_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, PROPOSERS_DB) - } - - pub fn metadata_db<'a>(&self, txn: &'a RwTransaction<'a>) -> Result<Database<'a>, Error> { - self.open_db(txn, METADATA_DB) - } - - pub fn db_flags() -> DatabaseFlags { - DatabaseFlags::default() - } - - pub fn write_flags() -> WriteFlags { - WriteFlags::default() - } - - pub fn begin_rw_txn(&self) -> Result<RwTransaction<'_>, Error> { - Ok(self.env.begin_rw_txn()?) - } - - pub fn geometry(config: &Config) -> Geometry<Range<usize>> { - Geometry { - size: Some(0..config.max_db_size_mbs * MEGABYTE), - growth_step: Some(MDBX_GROWTH_STEP), - shrink_threshold: None, - page_size: None, - } + pub fn begin_rw_txn(&self) -> Result<RwTransaction, Error> { + self.env.begin_rw_txn() } pub fn load_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<Option<u64>, Error> { - txn.get(&self.metadata_db(txn)?, METADATA_VERSION_KEY)? + txn.get(&self.databases.metadata_db, METADATA_VERSION_KEY)? .map(bincode_deserialize) .transpose() } pub fn store_schema_version(&self, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( - &self.metadata_db(txn)?, + &self.databases.metadata_db, &METADATA_VERSION_KEY, &bincode::serialize(&CURRENT_SCHEMA_VERSION)?, - Self::write_flags(), )?; Ok(()) } @@ -415,17 +315,16 @@ impl<E: EthSpec> SlasherDB<E> { &self, txn: &mut RwTransaction<'_>, ) -> Result<Option<T>, Error> { - txn.get(&self.metadata_db(txn)?, METADATA_CONFIG_KEY)? + txn.get(&self.databases.metadata_db, METADATA_CONFIG_KEY)? .map(bincode_deserialize) .transpose() } pub fn store_config(&self, config: &Config, txn: &mut RwTransaction<'_>) -> Result<(), Error> { txn.put( - &self.metadata_db(txn)?, + &self.databases.metadata_db, &METADATA_CONFIG_KEY, &bincode::serialize(config)?, - Self::write_flags(), )?; Ok(()) } @@ -436,7 +335,7 @@ impl<E: EthSpec> SlasherDB<E> { txn: &mut RwTransaction<'_>, ) -> Result<Option<Epoch>, Error> { txn.get( - &self.attesters_max_targets_db(txn)?, + &self.databases.attesters_max_targets_db, CurrentEpochKey::new(validator_index).as_ref(), )? .map(ssz_decode) @@ -466,19 +365,17 @@ impl<E: EthSpec> SlasherDB<E> { ); for target_epoch in (start_epoch..max_target.as_u64()).map(Epoch::new) { txn.put( - &self.attesters_db(txn)?, + &self.databases.attesters_db, &AttesterKey::new(validator_index, target_epoch, &self.config), &CompactAttesterRecord::null().as_bytes(), - Self::write_flags(), )?; } } txn.put( - &self.attesters_max_targets_db(txn)?, + &self.databases.attesters_max_targets_db, &CurrentEpochKey::new(validator_index), &max_target.as_ssz_bytes(), - Self::write_flags(), )?; Ok(()) } @@ -489,7 +386,7 @@ impl<E: EthSpec> SlasherDB<E> { txn: &mut RwTransaction<'_>, ) -> Result<Option<Epoch>, Error> { txn.get( - &self.current_epochs_db(txn)?, + &self.databases.current_epochs_db, CurrentEpochKey::new(validator_index).as_ref(), )? .map(ssz_decode) @@ -503,10 +400,9 @@ impl<E: EthSpec> SlasherDB<E> { txn: &mut RwTransaction<'_>, ) -> Result<(), Error> { txn.put( - &self.current_epochs_db(txn)?, + &self.databases.current_epochs_db, &CurrentEpochKey::new(validator_index), ¤t_epoch.as_ssz_bytes(), - Self::write_flags(), )?; Ok(()) } @@ -516,7 +412,7 @@ impl<E: EthSpec> SlasherDB<E> { txn: &mut RwTransaction<'_>, key: &IndexedAttestationIdKey, ) -> Result<Option<u64>, Error> { - txn.get(&self.indexed_attestation_id_db(txn)?, key.as_ref())? + txn.get(&self.databases.indexed_attestation_id_db, key.as_ref())? .map(IndexedAttestationId::parse) .transpose() } @@ -527,12 +423,7 @@ impl<E: EthSpec> SlasherDB<E> { key: &IndexedAttestationIdKey, value: IndexedAttestationId, ) -> Result<(), Error> { - txn.put( - &self.indexed_attestation_id_db(txn)?, - key, - &value, - Self::write_flags(), - )?; + txn.put(&self.databases.indexed_attestation_id_db, key, &value)?; Ok(()) } @@ -556,18 +447,19 @@ impl<E: EthSpec> SlasherDB<E> { } // Store the new indexed attestation at the end of the current table. - let mut cursor = txn.cursor(&self.indexed_attestation_db(txn)?)?; + let db = &self.databases.indexed_attestation_db; + let mut cursor = txn.cursor(db)?; - let indexed_att_id = match cursor.last::<_, ()>()? { + let indexed_att_id = match cursor.last_key()? { // First ID is 1 so that 0 can be used to represent `null` in `CompactAttesterRecord`. None => 1, - Some((key_bytes, _)) => IndexedAttestationId::parse(key_bytes)? + 1, + Some(key_bytes) => IndexedAttestationId::parse(key_bytes)? + 1, }; let attestation_key = IndexedAttestationId::new(indexed_att_id); let data = indexed_attestation.as_ssz_bytes(); - cursor.put(attestation_key.as_ref(), &data, Self::write_flags())?; + cursor.put(attestation_key.as_ref(), &data)?; drop(cursor); // Update the (epoch, hash) to ID mapping. @@ -583,7 +475,7 @@ impl<E: EthSpec> SlasherDB<E> { ) -> Result<IndexedAttestation<E>, Error> { let bytes = txn .get( - &self.indexed_attestation_db(txn)?, + &self.databases.indexed_attestation_db, indexed_attestation_id.as_ref(), )? .ok_or(Error::MissingIndexedAttestation { @@ -685,10 +577,9 @@ impl<E: EthSpec> SlasherDB<E> { self.update_attester_max_target(validator_index, prev_max_target, target_epoch, txn)?; txn.put( - &self.attesters_db(txn)?, + &self.databases.attesters_db, &AttesterKey::new(validator_index, target_epoch, &self.config), &indexed_attestation_id, - Self::write_flags(), )?; Ok(AttesterSlashingStatus::NotSlashable) @@ -725,7 +616,7 @@ impl<E: EthSpec> SlasherDB<E> { let attester_key = AttesterKey::new(validator_index, target, &self.config); Ok(txn - .get(&self.attesters_db(txn)?, attester_key.as_ref())? + .get(&self.databases.attesters_db, attester_key.as_ref())? .map(CompactAttesterRecord::parse) .transpose()? .filter(|record| !record.is_null())) @@ -738,7 +629,7 @@ impl<E: EthSpec> SlasherDB<E> { slot: Slot, ) -> Result<Option<SignedBeaconBlockHeader>, Error> { let proposer_key = ProposerKey::new(proposer_index, slot); - txn.get(&self.proposers_db(txn)?, proposer_key.as_ref())? + txn.get(&self.databases.proposers_db, proposer_key.as_ref())? .map(ssz_decode) .transpose() } @@ -764,10 +655,9 @@ impl<E: EthSpec> SlasherDB<E> { } } else { txn.put( - &self.proposers_db(txn)?, + &self.databases.proposers_db, &ProposerKey::new(proposer_index, slot), &block_header.as_ssz_bytes(), - Self::write_flags(), )?; Ok(ProposerSlashingStatus::NotSlashable) } @@ -776,14 +666,12 @@ impl<E: EthSpec> SlasherDB<E> { /// Attempt to prune the database, deleting old blocks and attestations. pub fn prune(&self, current_epoch: Epoch) -> Result<(), Error> { let mut txn = self.begin_rw_txn()?; - self.try_prune(current_epoch, &mut txn).allow_map_full()?; + self.try_prune(current_epoch, &mut txn)?; txn.commit()?; Ok(()) } /// Try to prune the database. - /// - /// This is a separate method from `prune` so that `allow_map_full` may be used. pub fn try_prune( &self, current_epoch: Epoch, @@ -804,22 +692,22 @@ impl<E: EthSpec> SlasherDB<E> { .saturating_sub(self.config.history_length) .start_slot(E::slots_per_epoch()); - let mut cursor = txn.cursor(&self.proposers_db(txn)?)?; + let mut cursor = txn.cursor(&self.databases.proposers_db)?; // Position cursor at first key, bailing out if the database is empty. - if cursor.first::<(), ()>()?.is_none() { + if cursor.first_key()?.is_none() { return Ok(()); } loop { - let (key_bytes, ()) = cursor.get_current()?.ok_or(Error::MissingProposerKey)?; + let (key_bytes, _) = cursor.get_current()?.ok_or(Error::MissingProposerKey)?; let (slot, _) = ProposerKey::parse(key_bytes)?; if slot < min_slot { - cursor.del(Self::write_flags())?; + cursor.delete_current()?; // End the loop if there is no next entry. - if cursor.next::<(), ()>()?.is_none() { + if cursor.next_key()?.is_none() { break; } } else { @@ -842,10 +730,10 @@ impl<E: EthSpec> SlasherDB<E> { // Collect indexed attestation IDs to delete. let mut indexed_attestation_ids = vec![]; - let mut cursor = txn.cursor(&self.indexed_attestation_id_db(txn)?)?; + let mut cursor = txn.cursor(&self.databases.indexed_attestation_id_db)?; // Position cursor at first key, bailing out if the database is empty. - if cursor.first::<(), ()>()?.is_none() { + if cursor.first_key()?.is_none() { return Ok(()); } @@ -861,9 +749,9 @@ impl<E: EthSpec> SlasherDB<E> { IndexedAttestationId::parse(value)?, )); - cursor.del(Self::write_flags())?; + cursor.delete_current()?; - if cursor.next::<(), ()>()?.is_none() { + if cursor.next_key()?.is_none() { break; } } else { @@ -874,9 +762,9 @@ impl<E: EthSpec> SlasherDB<E> { // Delete the indexed attestations. // Optimisation potential: use a cursor here. - let indexed_attestation_db = self.indexed_attestation_db(txn)?; + let indexed_attestation_db = &self.databases.indexed_attestation_db; for indexed_attestation_id in &indexed_attestation_ids { - txn.del(&indexed_attestation_db, indexed_attestation_id, None)?; + txn.del(indexed_attestation_db, indexed_attestation_id)?; } self.delete_attestation_data_roots(indexed_attestation_ids); diff --git a/slasher/src/database/interface.rs b/slasher/src/database/interface.rs new file mode 100644 index 0000000000..5bb920383c --- /dev/null +++ b/slasher/src/database/interface.rs @@ -0,0 +1,230 @@ +use crate::{Config, DatabaseBackend, Error}; +use std::borrow::Cow; +use std::marker::PhantomData; +use std::path::PathBuf; + +#[cfg(feature = "lmdb")] +use crate::database::lmdb_impl; +#[cfg(feature = "mdbx")] +use crate::database::mdbx_impl; + +#[derive(Debug)] +pub enum Environment { + #[cfg(feature = "mdbx")] + Mdbx(mdbx_impl::Environment), + #[cfg(feature = "lmdb")] + Lmdb(lmdb_impl::Environment), + Disabled, +} + +#[derive(Debug)] +pub enum RwTransaction<'env> { + #[cfg(feature = "mdbx")] + Mdbx(mdbx_impl::RwTransaction<'env>), + #[cfg(feature = "lmdb")] + Lmdb(lmdb_impl::RwTransaction<'env>), + Disabled(PhantomData<&'env ()>), +} + +#[derive(Debug)] +pub enum Database<'env> { + #[cfg(feature = "mdbx")] + Mdbx(mdbx_impl::Database<'env>), + #[cfg(feature = "lmdb")] + Lmdb(lmdb_impl::Database<'env>), + Disabled(PhantomData<&'env ()>), +} + +#[derive(Debug)] +pub struct OpenDatabases<'env> { + pub indexed_attestation_db: Database<'env>, + pub indexed_attestation_id_db: Database<'env>, + pub attesters_db: Database<'env>, + pub attesters_max_targets_db: Database<'env>, + pub min_targets_db: Database<'env>, + pub max_targets_db: Database<'env>, + pub current_epochs_db: Database<'env>, + pub proposers_db: Database<'env>, + pub metadata_db: Database<'env>, +} + +#[derive(Debug)] +pub enum Cursor<'env> { + #[cfg(feature = "mdbx")] + Mdbx(mdbx_impl::Cursor<'env>), + #[cfg(feature = "lmdb")] + Lmdb(lmdb_impl::Cursor<'env>), + Disabled(PhantomData<&'env ()>), +} + +pub type Key<'a> = Cow<'a, [u8]>; +pub type Value<'a> = Cow<'a, [u8]>; + +impl Environment { + pub fn new(config: &Config) -> Result<Environment, Error> { + match config.backend { + #[cfg(feature = "mdbx")] + DatabaseBackend::Mdbx => mdbx_impl::Environment::new(config).map(Environment::Mdbx), + #[cfg(feature = "lmdb")] + DatabaseBackend::Lmdb => lmdb_impl::Environment::new(config).map(Environment::Lmdb), + DatabaseBackend::Disabled => Err(Error::SlasherDatabaseBackendDisabled), + } + } + + pub fn create_databases(&self) -> Result<OpenDatabases, Error> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(env) => env.create_databases(), + #[cfg(feature = "lmdb")] + Self::Lmdb(env) => env.create_databases(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn begin_rw_txn(&self) -> Result<RwTransaction, Error> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(env) => env.begin_rw_txn().map(RwTransaction::Mdbx), + #[cfg(feature = "lmdb")] + Self::Lmdb(env) => env.begin_rw_txn().map(RwTransaction::Lmdb), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + /// List of all files used by the database. + pub fn filenames(&self, config: &Config) -> Vec<PathBuf> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(env) => env.filenames(config), + #[cfg(feature = "lmdb")] + Self::Lmdb(env) => env.filenames(config), + _ => vec![], + } + } +} + +impl<'env> RwTransaction<'env> { + pub fn get<K: AsRef<[u8]> + ?Sized>( + &'env self, + db: &Database<'env>, + key: &K, + ) -> Result<Option<Cow<'env, [u8]>>, Error> { + match (self, db) { + #[cfg(feature = "mdbx")] + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.get(db, key), + #[cfg(feature = "lmdb")] + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.get(db, key), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>( + &mut self, + db: &Database, + key: K, + value: V, + ) -> Result<(), Error> { + match (self, db) { + #[cfg(feature = "mdbx")] + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.put(db, key, value), + #[cfg(feature = "lmdb")] + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.put(db, key, value), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn del<K: AsRef<[u8]>>(&mut self, db: &Database, key: K) -> Result<(), Error> { + match (self, db) { + #[cfg(feature = "mdbx")] + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.del(db, key), + #[cfg(feature = "lmdb")] + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.del(db, key), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn cursor<'a>(&'a mut self, db: &Database) -> Result<Cursor<'a>, Error> { + match (self, db) { + #[cfg(feature = "mdbx")] + (Self::Mdbx(txn), Database::Mdbx(db)) => txn.cursor(db).map(Cursor::Mdbx), + #[cfg(feature = "lmdb")] + (Self::Lmdb(txn), Database::Lmdb(db)) => txn.cursor(db).map(Cursor::Lmdb), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn commit(self) -> Result<(), Error> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(txn) => txn.commit(), + #[cfg(feature = "lmdb")] + Self::Lmdb(txn) => txn.commit(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } +} + +impl<'env> Cursor<'env> { + /// Return the first key in the current database while advancing the cursor's position. + pub fn first_key(&mut self) -> Result<Option<Key>, Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.first_key(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.first_key(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + /// Return the last key in the current database while advancing the cursor's position. + pub fn last_key(&mut self) -> Result<Option<Key>, Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.last_key(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.last_key(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn next_key(&mut self) -> Result<Option<Key>, Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.next_key(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.next_key(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + /// Get the key value pair at the current position. + pub fn get_current(&mut self) -> Result<Option<(Key, Value)>, Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.get_current(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.get_current(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn delete_current(&mut self) -> Result<(), Error> { + match self { + #[cfg(feature = "mdbx")] + Cursor::Mdbx(cursor) => cursor.delete_current(), + #[cfg(feature = "lmdb")] + Cursor::Lmdb(cursor) => cursor.delete_current(), + _ => Err(Error::MismatchedDatabaseVariant), + } + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { + match self { + #[cfg(feature = "mdbx")] + Self::Mdbx(cursor) => cursor.put(key, value), + #[cfg(feature = "lmdb")] + Self::Lmdb(cursor) => cursor.put(key, value), + _ => Err(Error::MismatchedDatabaseVariant), + } + } +} diff --git a/slasher/src/database/lmdb_impl.rs b/slasher/src/database/lmdb_impl.rs new file mode 100644 index 0000000000..98839fcc46 --- /dev/null +++ b/slasher/src/database/lmdb_impl.rs @@ -0,0 +1,203 @@ +#![cfg(feature = "lmdb")] + +use crate::{ + config::MEGABYTE, + database::{ + interface::{Key, OpenDatabases, Value}, + *, + }, + Config, Error, +}; +use lmdb::{Cursor as _, DatabaseFlags, Transaction, WriteFlags}; +use lmdb_sys::{MDB_FIRST, MDB_GET_CURRENT, MDB_LAST, MDB_NEXT}; +use std::borrow::Cow; +use std::marker::PhantomData; +use std::path::PathBuf; + +#[derive(Debug)] +pub struct Environment { + env: lmdb::Environment, +} + +#[derive(Debug)] +pub struct RwTransaction<'env> { + txn: lmdb::RwTransaction<'env>, +} + +#[derive(Debug)] +pub struct Database<'env> { + db: lmdb::Database, + _phantom: PhantomData<&'env ()>, +} + +#[derive(Debug)] +pub struct Cursor<'env> { + cursor: lmdb::RwCursor<'env>, +} + +impl Environment { + pub fn new(config: &Config) -> Result<Environment, Error> { + let env = lmdb::Environment::new() + .set_max_dbs(MAX_NUM_DBS as u32) + .set_map_size(config.max_db_size_mbs * MEGABYTE) + .open_with_permissions(&config.database_path, 0o600)?; + Ok(Environment { env }) + } + + pub fn create_databases(&self) -> Result<OpenDatabases, Error> { + let indexed_attestation_db = self + .env + .create_db(Some(INDEXED_ATTESTATION_DB), Self::db_flags())?; + let indexed_attestation_id_db = self + .env + .create_db(Some(INDEXED_ATTESTATION_ID_DB), Self::db_flags())?; + let attesters_db = self.env.create_db(Some(ATTESTERS_DB), Self::db_flags())?; + let attesters_max_targets_db = self + .env + .create_db(Some(ATTESTERS_MAX_TARGETS_DB), Self::db_flags())?; + let min_targets_db = self.env.create_db(Some(MIN_TARGETS_DB), Self::db_flags())?; + let max_targets_db = self.env.create_db(Some(MAX_TARGETS_DB), Self::db_flags())?; + let current_epochs_db = self + .env + .create_db(Some(CURRENT_EPOCHS_DB), Self::db_flags())?; + let proposers_db = self.env.create_db(Some(PROPOSERS_DB), Self::db_flags())?; + let metadata_db = self.env.create_db(Some(METADATA_DB), Self::db_flags())?; + + let wrap = |db| { + crate::Database::Lmdb(Database { + db, + _phantom: PhantomData, + }) + }; + + Ok(OpenDatabases { + indexed_attestation_db: wrap(indexed_attestation_db), + indexed_attestation_id_db: wrap(indexed_attestation_id_db), + attesters_db: wrap(attesters_db), + attesters_max_targets_db: wrap(attesters_max_targets_db), + min_targets_db: wrap(min_targets_db), + max_targets_db: wrap(max_targets_db), + current_epochs_db: wrap(current_epochs_db), + proposers_db: wrap(proposers_db), + metadata_db: wrap(metadata_db), + }) + } + + pub fn begin_rw_txn(&self) -> Result<RwTransaction, Error> { + let txn = self.env.begin_rw_txn()?; + Ok(RwTransaction { txn }) + } + + pub fn filenames(&self, config: &Config) -> Vec<PathBuf> { + vec![ + config.database_path.join("data.mdb"), + config.database_path.join("lock.mdb"), + ] + } + + fn db_flags() -> DatabaseFlags { + DatabaseFlags::default() + } +} + +impl<'env> RwTransaction<'env> { + pub fn get<K: AsRef<[u8]> + ?Sized>( + &'env self, + db: &Database<'env>, + key: &K, + ) -> Result<Option<Cow<'env, [u8]>>, Error> { + Ok(self.txn.get(db.db, key).optional()?.map(Cow::Borrowed)) + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>( + &mut self, + db: &Database, + key: K, + value: V, + ) -> Result<(), Error> { + self.txn.put(db.db, &key, &value, Self::write_flags())?; + Ok(()) + } + + pub fn del<K: AsRef<[u8]>>(&mut self, db: &Database, key: K) -> Result<(), Error> { + self.txn.del(db.db, &key, None)?; + Ok(()) + } + + pub fn cursor<'a>(&'a mut self, db: &Database) -> Result<Cursor<'a>, Error> { + let cursor = self.txn.open_rw_cursor(db.db)?; + Ok(Cursor { cursor }) + } + + pub fn commit(self) -> Result<(), Error> { + self.txn.commit()?; + Ok(()) + } + + fn write_flags() -> WriteFlags { + WriteFlags::default() + } +} + +impl<'env> Cursor<'env> { + pub fn first_key(&mut self) -> Result<Option<Key>, Error> { + let opt_key = self + .cursor + .get(None, None, MDB_FIRST) + .optional()? + .and_then(|(key, _)| Some(Cow::Borrowed(key?))); + Ok(opt_key) + } + + pub fn last_key(&mut self) -> Result<Option<Key<'env>>, Error> { + let opt_key = self + .cursor + .get(None, None, MDB_LAST) + .optional()? + .and_then(|(key, _)| Some(Cow::Borrowed(key?))); + Ok(opt_key) + } + + pub fn next_key(&mut self) -> Result<Option<Key<'env>>, Error> { + let opt_key = self + .cursor + .get(None, None, MDB_NEXT) + .optional()? + .and_then(|(key, _)| Some(Cow::Borrowed(key?))); + Ok(opt_key) + } + + pub fn get_current(&mut self) -> Result<Option<(Key<'env>, Value<'env>)>, Error> { + if let Some((Some(key), value)) = self.cursor.get(None, None, MDB_GET_CURRENT).optional()? { + Ok(Some((Cow::Borrowed(key), Cow::Borrowed(value)))) + } else { + Ok(None) + } + } + + pub fn delete_current(&mut self) -> Result<(), Error> { + self.cursor.del(RwTransaction::write_flags())?; + Ok(()) + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { + self.cursor + .put(&key, &value, RwTransaction::write_flags())?; + Ok(()) + } +} + +/// Mix-in trait for loading values from LMDB that may or may not exist. +pub trait TxnOptional<T, E> { + fn optional(self) -> Result<Option<T>, E>; +} + +impl<T> TxnOptional<T, Error> for Result<T, lmdb::Error> { + fn optional(self) -> Result<Option<T>, Error> { + match self { + Ok(x) => Ok(Some(x)), + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(e.into()), + } + } +} diff --git a/slasher/src/database/mdbx_impl.rs b/slasher/src/database/mdbx_impl.rs new file mode 100644 index 0000000000..d25f17e7ac --- /dev/null +++ b/slasher/src/database/mdbx_impl.rs @@ -0,0 +1,186 @@ +#![cfg(feature = "mdbx")] + +use crate::{ + config::MEGABYTE, + database::{ + interface::{Key, OpenDatabases, Value}, + *, + }, + Config, Error, +}; +use mdbx::{DatabaseFlags, Geometry, WriteFlags}; +use std::borrow::Cow; +use std::ops::Range; +use std::path::PathBuf; + +pub const MDBX_GROWTH_STEP: isize = 256 * (1 << 20); // 256 MiB + +#[derive(Debug)] +pub struct Environment { + env: mdbx::Environment<mdbx::NoWriteMap>, +} + +#[derive(Debug)] +pub struct RwTransaction<'env> { + txn: mdbx::Transaction<'env, mdbx::RW, mdbx::NoWriteMap>, +} + +#[derive(Debug)] +pub struct Database<'env> { + db: mdbx::Database<'env>, +} + +#[derive(Debug)] +pub struct Cursor<'env> { + cursor: mdbx::Cursor<'env, mdbx::RW>, +} + +impl Environment { + pub fn new(config: &Config) -> Result<Environment, Error> { + let env = mdbx::Environment::new() + .set_max_dbs(MAX_NUM_DBS) + .set_geometry(Self::geometry(config)) + .open_with_permissions(&config.database_path, 0o600)?; + Ok(Environment { env }) + } + + pub fn create_databases(&self) -> Result<OpenDatabases, Error> { + let txn = self.begin_rw_txn()?; + txn.create_db(INDEXED_ATTESTATION_DB)?; + txn.create_db(INDEXED_ATTESTATION_ID_DB)?; + txn.create_db(ATTESTERS_DB)?; + txn.create_db(ATTESTERS_MAX_TARGETS_DB)?; + txn.create_db(MIN_TARGETS_DB)?; + txn.create_db(MAX_TARGETS_DB)?; + txn.create_db(CURRENT_EPOCHS_DB)?; + txn.create_db(PROPOSERS_DB)?; + txn.create_db(METADATA_DB)?; + + // This is all rather nasty + let (_, mut databases) = txn.txn.commit_and_rebind_open_dbs()?; + let mut next_db = || { + crate::Database::Mdbx(Database { + db: databases.remove(0), + }) + }; + + Ok(OpenDatabases { + indexed_attestation_db: next_db(), + indexed_attestation_id_db: next_db(), + attesters_db: next_db(), + attesters_max_targets_db: next_db(), + min_targets_db: next_db(), + max_targets_db: next_db(), + current_epochs_db: next_db(), + proposers_db: next_db(), + metadata_db: next_db(), + }) + } + + pub fn begin_rw_txn(&self) -> Result<RwTransaction, Error> { + let txn = self.env.begin_rw_txn()?; + Ok(RwTransaction { txn }) + } + + pub fn filenames(&self, config: &Config) -> Vec<PathBuf> { + vec![ + config.database_path.join("mdbx.dat"), + config.database_path.join("mdbx.lck"), + ] + } + + fn geometry(config: &Config) -> Geometry<Range<usize>> { + Geometry { + size: Some(0..config.max_db_size_mbs * MEGABYTE), + growth_step: Some(MDBX_GROWTH_STEP), + shrink_threshold: None, + page_size: None, + } + } +} + +impl<'env> RwTransaction<'env> { + pub fn create_db(&self, name: &'static str) -> Result<(), Error> { + let db = self.txn.create_db(Some(name), Self::db_flags())?; + self.txn.prime_for_permaopen(db); + Ok(()) + } + + pub fn open_db(&self, name: &'static str) -> Result<Database, Error> { + let db = self.txn.open_db(Some(name))?; + Ok(Database { db }) + } + + pub fn get<K: AsRef<[u8]> + ?Sized>( + &'env self, + db: &Database<'env>, + key: &K, + ) -> Result<Option<Cow<'env, [u8]>>, Error> { + Ok(self.txn.get(&db.db, key.as_ref())?) + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>( + &self, + db: &Database, + key: K, + value: V, + ) -> Result<(), Error> { + self.txn.put(&db.db, key, value, Self::write_flags())?; + Ok(()) + } + + pub fn del<K: AsRef<[u8]>>(&self, db: &Database, key: K) -> Result<(), Error> { + self.txn.del(&db.db, key, None)?; + Ok(()) + } + + pub fn cursor<'a>(&'a self, db: &Database) -> Result<Cursor<'a>, Error> { + let cursor = self.txn.cursor(&db.db)?; + Ok(Cursor { cursor }) + } + + pub fn commit(self) -> Result<(), Error> { + self.txn.commit()?; + Ok(()) + } + + fn db_flags() -> DatabaseFlags { + DatabaseFlags::default() + } + + fn write_flags() -> WriteFlags { + WriteFlags::default() + } +} + +impl<'env> Cursor<'env> { + pub fn first_key(&mut self) -> Result<Option<Cow<'env, [u8]>>, Error> { + let opt_key = self.cursor.first()?.map(|(key_bytes, ())| key_bytes); + Ok(opt_key) + } + + pub fn last_key(&mut self) -> Result<Option<Cow<'env, [u8]>>, Error> { + let opt_key = self.cursor.last()?.map(|(key_bytes, ())| key_bytes); + Ok(opt_key) + } + + pub fn next_key(&mut self) -> Result<Option<Cow<'env, [u8]>>, Error> { + let opt_key = self.cursor.next()?.map(|(key_bytes, ())| key_bytes); + Ok(opt_key) + } + + pub fn get_current(&mut self) -> Result<Option<(Key<'env>, Value<'env>)>, Error> { + Ok(self.cursor.get_current()?) + } + + pub fn delete_current(&mut self) -> Result<(), Error> { + self.cursor.del(RwTransaction::write_flags())?; + Ok(()) + } + + pub fn put<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) -> Result<(), Error> { + self.cursor + .put(key.as_ref(), value.as_ref(), RwTransaction::write_flags())?; + Ok(()) + } +} diff --git a/slasher/src/error.rs b/slasher/src/error.rs index 7e689022e4..b939c281e9 100644 --- a/slasher/src/error.rs +++ b/slasher/src/error.rs @@ -4,7 +4,12 @@ use types::Epoch; #[derive(Debug)] pub enum Error { - DatabaseError(mdbx::Error), + #[cfg(feature = "mdbx")] + DatabaseMdbxError(mdbx::Error), + #[cfg(feature = "lmdb")] + DatabaseLmdbError(lmdb::Error), + SlasherDatabaseBackendDisabled, + MismatchedDatabaseVariant, DatabaseIOError(io::Error), DatabasePermissionsError(filesystem::Error), SszDecodeError(ssz::DecodeError), @@ -63,11 +68,22 @@ pub enum Error { InconsistentAttestationDataRoot, } +#[cfg(feature = "mdbx")] impl From<mdbx::Error> for Error { fn from(e: mdbx::Error) -> Self { match e { mdbx::Error::Other(os_error) => Error::from(io::Error::from_raw_os_error(os_error)), - _ => Error::DatabaseError(e), + _ => Error::DatabaseMdbxError(e), + } + } +} + +#[cfg(feature = "lmdb")] +impl From<lmdb::Error> for Error { + fn from(e: lmdb::Error) -> Self { + match e { + lmdb::Error::Other(os_error) => Error::from(io::Error::from_raw_os_error(os_error)), + _ => Error::DatabaseLmdbError(e), } } } diff --git a/slasher/src/lib.rs b/slasher/src/lib.rs index 184e3080e5..132ce8b235 100644 --- a/slasher/src/lib.rs +++ b/slasher/src/lib.rs @@ -1,4 +1,8 @@ #![deny(missing_debug_implementations)] +#![cfg_attr( + not(any(feature = "mdbx", feature = "lmdb")), + allow(unused, clippy::drop_non_drop) +)] mod array; mod attestation_queue; @@ -12,22 +16,20 @@ pub mod metrics; mod migrate; mod slasher; pub mod test_utils; -mod utils; pub use crate::slasher::Slasher; pub use attestation_queue::{AttestationBatch, AttestationQueue, SimpleBatch}; pub use attester_record::{AttesterRecord, CompactAttesterRecord, IndexedAttesterRecord}; pub use block_queue::BlockQueue; -pub use config::Config; -pub use database::{IndexedAttestationId, SlasherDB}; +pub use config::{Config, DatabaseBackend}; +pub use database::{ + interface::{Database, Environment, RwTransaction}, + IndexedAttestationId, SlasherDB, +}; pub use error::Error; use types::{AttesterSlashing, EthSpec, IndexedAttestation, ProposerSlashing}; -/// LMDB-to-MDBX compatibility shims. -pub type Environment = mdbx::Environment<mdbx::NoWriteMap>; -pub type RwTransaction<'env> = mdbx::Transaction<'env, mdbx::RW, mdbx::NoWriteMap>; - #[derive(Debug, PartialEq)] pub enum AttesterSlashingStatus<E: EthSpec> { NotSlashable, diff --git a/slasher/src/utils.rs b/slasher/src/utils.rs deleted file mode 100644 index ccd31e74e2..0000000000 --- a/slasher/src/utils.rs +++ /dev/null @@ -1,16 +0,0 @@ -use crate::Error; - -/// Transform a transaction that would fail with a `MapFull` error into an optional result. -pub trait TxnMapFull<T, E> { - fn allow_map_full(self) -> Result<Option<T>, E>; -} - -impl<T> TxnMapFull<T, Error> for Result<T, Error> { - fn allow_map_full(self) -> Result<Option<T>, Error> { - match self { - Ok(x) => Ok(Some(x)), - Err(Error::DatabaseError(mdbx::Error::MapFull)) => Ok(None), - Err(e) => Err(e), - } - } -} diff --git a/slasher/tests/attester_slashings.rs b/slasher/tests/attester_slashings.rs index a2abbc55b1..5cf3fe6c2a 100644 --- a/slasher/tests/attester_slashings.rs +++ b/slasher/tests/attester_slashings.rs @@ -1,3 +1,5 @@ +#![cfg(any(feature = "mdbx", feature = "lmdb"))] + use logging::test_logger; use maplit::hashset; use rayon::prelude::*; diff --git a/slasher/tests/proposer_slashings.rs b/slasher/tests/proposer_slashings.rs index e8b052e664..3b7b8ed583 100644 --- a/slasher/tests/proposer_slashings.rs +++ b/slasher/tests/proposer_slashings.rs @@ -1,3 +1,5 @@ +#![cfg(any(feature = "mdbx", feature = "lmdb"))] + use logging::test_logger; use slasher::{ test_utils::{block as test_block, E}, diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 8126602f37..968a4dbb68 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -1,3 +1,5 @@ +#![cfg(any(feature = "mdbx", feature = "lmdb"))] + use logging::test_logger; use rand::prelude::*; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; diff --git a/slasher/tests/wrap_around.rs b/slasher/tests/wrap_around.rs index b256840ee5..d2c876d363 100644 --- a/slasher/tests/wrap_around.rs +++ b/slasher/tests/wrap_around.rs @@ -1,3 +1,5 @@ +#![cfg(any(feature = "mdbx", feature = "lmdb"))] + use logging::test_logger; use slasher::{test_utils::indexed_att, Config, Slasher}; use tempfile::tempdir; diff --git a/testing/antithesis/Dockerfile.libvoidstar b/testing/antithesis/Dockerfile.libvoidstar index 61b95397d7..32e2d5648d 100644 --- a/testing/antithesis/Dockerfile.libvoidstar +++ b/testing/antithesis/Dockerfile.libvoidstar @@ -1,9 +1,10 @@ -FROM rust:1.58.1-bullseye AS builder +FROM rust:1.62.1-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse -# build lighthouse directly with a cargo build command, bypassing the makefile -RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse +# Build lighthouse directly with a cargo build command, bypassing the Makefile. +# We have to use nightly in order to disable the new LLVM pass manager. +RUN rustup default nightly-2022-07-26 && cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Znew-llvm-pass-manager=no -Cpasses=sancov -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse # build lcli binary directly with cargo install command, bypassing the makefile RUN cargo install --path /lighthouse/lcli --force --locked diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 13d8f631cc..dc89cb5d5f 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.1.10 +TESTS_TAG := v1.2.0-rc.3 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 2eb4ce5407..a10ccf1e6f 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -25,16 +25,28 @@ excluded_paths = [ # Intentionally omitted, as per https://github.com/sigp/lighthouse/issues/1835 "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", + # light_client + "tests/.*/.*/light_client", # LightClientStore "tests/.*/.*/ssz_static/LightClientStore", # LightClientUpdate "tests/.*/.*/ssz_static/LightClientUpdate", # LightClientSnapshot "tests/.*/.*/ssz_static/LightClientSnapshot", + # LightClientBootstrap + "tests/.*/.*/ssz_static/LightClientBootstrap", + # LightClientOptimistic + "tests/.*/.*/ssz_static/LightClientOptimistic", + # LightClientFinalityUpdate + "tests/.*/.*/ssz_static/LightClientFinalityUpdate", # Merkle-proof tests for light clients "tests/.*/.*/merkle/single_proof", + # Capella tests are disabled for now. + "tests/.*/capella", # One of the EF researchers likes to pack the tarballs on a Mac - ".*\.DS_Store.*" + ".*\.DS_Store.*", + # More Mac weirdness. + "tests/mainnet/bellatrix/operations/deposit/pyspec_tests/deposit_with_previous_fork_version__valid_ineffective/._meta.yaml" ] def normalize_path(path): diff --git a/testing/ef_tests/src/bls_setting.rs b/testing/ef_tests/src/bls_setting.rs index add7d8b7bd..24aaf60080 100644 --- a/testing/ef_tests/src/bls_setting.rs +++ b/testing/ef_tests/src/bls_setting.rs @@ -2,20 +2,15 @@ use self::BlsSetting::*; use crate::error::Error; use serde_repr::Deserialize_repr; -#[derive(Deserialize_repr, Debug, Clone, Copy)] +#[derive(Deserialize_repr, Debug, Clone, Copy, Default)] #[repr(u8)] pub enum BlsSetting { + #[default] Flexible = 0, Required = 1, Ignored = 2, } -impl Default for BlsSetting { - fn default() -> Self { - Flexible - } -} - impl BlsSetting { /// Check the BLS setting and skip the test if it isn't compatible with the crypto config. pub fn check(self) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index ac9ca8993c..64f4aa7538 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -81,11 +81,23 @@ pub struct Cases<T> { } impl<T: Case> Cases<T> { - pub fn test_results(&self, fork_name: ForkName) -> Vec<CaseResult> { - self.test_cases - .into_par_iter() - .enumerate() - .map(|(i, (ref path, ref tc))| CaseResult::new(i, path, tc, tc.result(i, fork_name))) - .collect() + pub fn test_results(&self, fork_name: ForkName, use_rayon: bool) -> Vec<CaseResult> { + if use_rayon { + self.test_cases + .into_par_iter() + .enumerate() + .map(|(i, (ref path, ref tc))| { + CaseResult::new(i, path, tc, tc.result(i, fork_name)) + }) + .collect() + } else { + self.test_cases + .iter() + .enumerate() + .map(|(i, (ref path, ref tc))| { + CaseResult::new(i, path, tc, tc.result(i, fork_name)) + }) + .collect() + } } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index ccc6b5e6f9..9fbc6b7dc6 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -88,17 +88,23 @@ impl<E: EthSpec> EpochTransition<E> for JustificationAndFinalization { BeaconState::Base(_) => { let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; validator_statuses.process_attestations(state)?; - base::process_justification_and_finalization( - state, - &validator_statuses.total_balances, - spec, - ) + let justification_and_finalization_state = + base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) } BeaconState::Altair(_) | BeaconState::Merge(_) => { - altair::process_justification_and_finalization( - state, - &altair::ParticipationCache::new(state, spec).unwrap(), - ) + let justification_and_finalization_state = + altair::process_justification_and_finalization( + state, + &altair::ParticipationCache::new(state, spec).unwrap(), + )?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) } } } @@ -271,7 +277,8 @@ impl<E: EthSpec, T: EpochTransition<E>> Case for EpochProcessing<E, T> { && T::name() != "inactivity_updates" && T::name() != "participation_flag_updates" } - ForkName::Altair | ForkName::Merge => true, + // No phase0 tests for Altair and later. + ForkName::Altair | ForkName::Merge => T::name() != "participation_record_updates", } } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 92c28aeb04..650452d783 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -7,15 +7,17 @@ use beacon_chain::{ obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, }, test_utils::{BeaconChainHarness, EphemeralHarnessType}, - BeaconChainTypes, HeadInfo, + BeaconChainTypes, CachedHead, CountUnrealized, }; use serde_derive::Deserialize; use ssz_derive::Decode; use state_processing::state_advance::complete_state_advance; +use std::future::Future; +use std::sync::Arc; use std::time::Duration; use types::{ - Attestation, BeaconBlock, BeaconState, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, - ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, + ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -43,16 +45,19 @@ pub struct Checks { justified_checkpoint_root: Option<Hash256>, finalized_checkpoint: Option<Checkpoint>, best_justified_checkpoint: Option<Checkpoint>, + u_justified_checkpoint: Option<Checkpoint>, + u_finalized_checkpoint: Option<Checkpoint>, proposer_boost_root: Option<Hash256>, } #[derive(Debug, Clone, Deserialize)] #[serde(untagged, deny_unknown_fields)] -pub enum Step<B, A, P> { +pub enum Step<B, A, AS, P> { Tick { tick: u64 }, ValidBlock { block: B }, MaybeValidBlock { block: B, valid: bool }, Attestation { attestation: A }, + AttesterSlashing { attester_slashing: AS }, PowBlock { pow_block: P }, Checks { checks: Box<Checks> }, } @@ -69,16 +74,8 @@ pub struct ForkChoiceTest<E: EthSpec> { pub description: String, pub anchor_state: BeaconState<E>, pub anchor_block: BeaconBlock<E>, - pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, PowBlock>>, -} - -/// Spec for fork choice tests, with proposer boosting enabled. -/// -/// This function can be deleted once `ChainSpec::mainnet` enables proposer boosting by default. -pub fn fork_choice_spec<E: EthSpec>(fork_name: ForkName) -> ChainSpec { - let mut spec = testing_spec::<E>(fork_name); - spec.proposer_score_boost = Some(70); - spec + #[allow(clippy::type_complexity)] + pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, AttesterSlashing<E>, PowBlock>>, } impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { @@ -90,8 +87,9 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { .to_str() .expect("path must be valid OsStr") .to_string(); - let spec = &fork_choice_spec::<E>(fork_name); - let steps: Vec<Step<String, String, String>> = yaml_decode_file(&path.join("steps.yaml"))?; + let spec = &testing_spec::<E>(fork_name); + let steps: Vec<Step<String, String, String, String>> = + yaml_decode_file(&path.join("steps.yaml"))?; // Resolve the object names in `steps.yaml` into actual decoded block/attestation objects. let steps = steps .into_iter() @@ -113,6 +111,10 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { ssz_decode_file(&path.join(format!("{}.ssz_snappy", attestation))) .map(|attestation| Step::Attestation { attestation }) } + Step::AttesterSlashing { attester_slashing } => { + ssz_decode_file(&path.join(format!("{}.ssz_snappy", attester_slashing))) + .map(|attester_slashing| Step::AttesterSlashing { attester_slashing }) + } Step::PowBlock { pow_block } => { ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) .map(|pow_block| Step::PowBlock { pow_block }) @@ -152,14 +154,7 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { - let tester = Tester::new(self, fork_choice_spec::<E>(fork_name))?; - - // TODO(merge): re-enable this test before production. - // This test is skipped until we can do retrospective confirmations of the terminal - // block after an optimistic sync. - if self.description == "block_lookup_failed" { - return Err(Error::SkippedKnownFailure); - }; + let tester = Tester::new(self, testing_spec::<E>(fork_name))?; for step in &self.steps { match step { @@ -169,6 +164,9 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { tester.process_block(block.clone(), *valid)? } Step::Attestation { attestation } => tester.process_attestation(attestation)?, + Step::AttesterSlashing { attester_slashing } => { + tester.process_attester_slashing(attester_slashing) + } Step::PowBlock { pow_block } => tester.process_pow_block(pow_block), Step::Checks { checks } => { let Checks { @@ -179,6 +177,8 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { justified_checkpoint_root, finalized_checkpoint, best_justified_checkpoint, + u_justified_checkpoint, + u_finalized_checkpoint, proposer_boost_root, } = checks.as_ref(); @@ -212,6 +212,14 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> { .check_best_justified_checkpoint(*expected_best_justified_checkpoint)?; } + if let Some(expected_u_justified_checkpoint) = u_justified_checkpoint { + tester.check_u_justified_checkpoint(*expected_u_justified_checkpoint)?; + } + + if let Some(expected_u_finalized_checkpoint) = u_finalized_checkpoint { + tester.check_u_finalized_checkpoint(*expected_u_finalized_checkpoint)?; + } + if let Some(expected_proposer_boost_root) = proposer_boost_root { tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?; } @@ -287,19 +295,19 @@ impl<E: EthSpec> Tester<E> { Ok(self.spec.genesis_slot + slots_since_genesis) } - fn find_head(&self) -> Result<HeadInfo, Error> { + fn block_on_dangerous<F: Future>(&self, future: F) -> Result<F::Output, Error> { self.harness .chain - .fork_choice() - .map_err(|e| Error::InternalError(format!("failed to find head with {:?}", e)))?; - self.harness - .chain - .head_info() - .map_err(|e| Error::InternalError(format!("failed to read head with {:?}", e))) + .task_executor + .clone() + .block_on_dangerous(future, "ef_tests_block_on") + .ok_or_else(|| Error::InternalError("runtime shutdown".into())) } - fn genesis_epoch(&self) -> Epoch { - self.spec.genesis_slot.epoch(E::slots_per_epoch()) + fn find_head(&self) -> Result<CachedHead<E>, Error> { + let chain = self.harness.chain.clone(); + self.block_on_dangerous(chain.recompute_head_at_current_slot())?; + Ok(self.harness.chain.canonical_head.cached_head()) } pub fn set_tick(&self, tick: u64) { @@ -314,15 +322,20 @@ impl<E: EthSpec> Tester<E> { self.harness .chain - .fork_choice - .write() - .update_time(slot) + .canonical_head + .fork_choice_write_lock() + .update_time(slot, &self.spec) .unwrap(); } pub fn process_block(&self, block: SignedBeaconBlock<E>, valid: bool) -> Result<(), Error> { - let result = self.harness.chain.process_block(block.clone()); let block_root = block.canonical_root(); + let block = Arc::new(block); + let result = self.block_on_dangerous( + self.harness + .chain + .process_block(block.clone(), CountUnrealized::False), + )?; if result.is_ok() != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", @@ -367,16 +380,21 @@ impl<E: EthSpec> Tester<E> { .seconds_from_current_slot_start(self.spec.seconds_per_slot) .unwrap(); - let (block, _) = block.deconstruct(); - let result = self.harness.chain.fork_choice.write().on_block( - self.harness.chain.slot().unwrap(), - &block, - block_root, - block_delay, - &state, - PayloadVerificationStatus::Irrelevant, - &self.harness.chain.spec, - ); + let result = self + .harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_block( + self.harness.chain.slot().unwrap(), + block.message(), + block_root, + block_delay, + &state, + PayloadVerificationStatus::Irrelevant, + &self.harness.chain.spec, + self.harness.chain.config.count_unrealized.into(), + ); if result.is_ok() { return Err(Error::DidntFail(format!( @@ -408,6 +426,14 @@ impl<E: EthSpec> Tester<E> { .map_err(|e| Error::InternalError(format!("attestation import failed with {:?}", e))) } + pub fn process_attester_slashing(&self, attester_slashing: &AttesterSlashing<E>) { + self.harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_attester_slashing(attester_slashing) + } + pub fn process_pow_block(&self, pow_block: &PowBlock) { let el = self.harness.mock_execution_layer.as_ref().unwrap(); @@ -424,10 +450,11 @@ impl<E: EthSpec> Tester<E> { } pub fn check_head(&self, expected_head: Head) -> Result<(), Error> { - let chain_head = self.find_head().map(|head| Head { - slot: head.slot, - root: head.block_root, - })?; + let head = self.find_head()?; + let chain_head = Head { + slot: head.head_slot(), + root: head.head_block_root(), + }; check_equal("head", chain_head, expected_head) } @@ -446,15 +473,15 @@ impl<E: EthSpec> Tester<E> { } pub fn check_justified_checkpoint(&self, expected_checkpoint: Checkpoint) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.current_justified_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().justified_checkpoint(); + let head_checkpoint = self.find_head()?.justified_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .justified_checkpoint(); - assert_checkpoints_eq( - "justified_checkpoint", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + assert_checkpoints_eq("justified_checkpoint", head_checkpoint, fc_checkpoint); check_equal("justified_checkpoint", fc_checkpoint, expected_checkpoint) } @@ -463,15 +490,15 @@ impl<E: EthSpec> Tester<E> { &self, expected_checkpoint_root: Hash256, ) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.current_justified_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().justified_checkpoint(); + let head_checkpoint = self.find_head()?.justified_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .justified_checkpoint(); - assert_checkpoints_eq( - "justified_checkpoint_root", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + assert_checkpoints_eq("justified_checkpoint_root", head_checkpoint, fc_checkpoint); check_equal( "justified_checkpoint_root", @@ -481,15 +508,15 @@ impl<E: EthSpec> Tester<E> { } pub fn check_finalized_checkpoint(&self, expected_checkpoint: Checkpoint) -> Result<(), Error> { - let head_checkpoint = self.find_head()?.finalized_checkpoint; - let fc_checkpoint = self.harness.chain.fork_choice.read().finalized_checkpoint(); + let head_checkpoint = self.find_head()?.finalized_checkpoint(); + let fc_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .finalized_checkpoint(); - assert_checkpoints_eq( - "finalized_checkpoint", - self.genesis_epoch(), - head_checkpoint, - fc_checkpoint, - ); + assert_checkpoints_eq("finalized_checkpoint", head_checkpoint, fc_checkpoint); check_equal("finalized_checkpoint", fc_checkpoint, expected_checkpoint) } @@ -501,8 +528,8 @@ impl<E: EthSpec> Tester<E> { let best_justified_checkpoint = self .harness .chain - .fork_choice - .read() + .canonical_head + .fork_choice_read_lock() .best_justified_checkpoint(); check_equal( "best_justified_checkpoint", @@ -511,11 +538,50 @@ impl<E: EthSpec> Tester<E> { ) } + pub fn check_u_justified_checkpoint( + &self, + expected_checkpoint: Checkpoint, + ) -> Result<(), Error> { + let u_justified_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .unrealized_justified_checkpoint(); + check_equal( + "u_justified_checkpoint", + u_justified_checkpoint, + expected_checkpoint, + ) + } + + pub fn check_u_finalized_checkpoint( + &self, + expected_checkpoint: Checkpoint, + ) -> Result<(), Error> { + let u_finalized_checkpoint = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .unrealized_finalized_checkpoint(); + check_equal( + "u_finalized_checkpoint", + u_finalized_checkpoint, + expected_checkpoint, + ) + } + pub fn check_expected_proposer_boost_root( &self, expected_proposer_boost_root: Hash256, ) -> Result<(), Error> { - let proposer_boost_root = self.harness.chain.fork_choice.read().proposer_boost_root(); + let proposer_boost_root = self + .harness + .chain + .canonical_head + .fork_choice_read_lock() + .proposer_boost_root(); check_equal( "proposer_boost_root", proposer_boost_root, @@ -530,20 +596,8 @@ impl<E: EthSpec> Tester<E> { /// This function is necessary due to a quirk documented in this issue: /// /// https://github.com/ethereum/consensus-specs/issues/2566 -fn assert_checkpoints_eq(name: &str, genesis_epoch: Epoch, head: Checkpoint, fc: Checkpoint) { - if fc.epoch == genesis_epoch { - assert_eq!( - head, - Checkpoint { - epoch: genesis_epoch, - root: Hash256::zero() - }, - "{} (genesis)", - name - ) - } else { - assert_eq!(head, fc, "{} (non-genesis)", name) - } +fn assert_checkpoints_eq(name: &str, head: Checkpoint, fc: Checkpoint) { + assert_eq!(head, fc, "{}", name) } /// Convenience function to create `Error` messages. diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index e2b43e5c78..83e0702993 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -20,8 +20,9 @@ use state_processing::{ use std::fmt::Debug; use std::path::Path; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, ForkName, - FullPayload, ProposerSlashing, SignedVoluntaryExit, SyncAggregate, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, + SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -260,6 +261,40 @@ impl<E: EthSpec> Operation<E> for FullPayload<E> { } } } +impl<E: EthSpec> Operation<E> for BlindedPayload<E> { + fn handler_name() -> String { + "execution_payload".into() + } + + fn filename() -> String { + "execution_payload.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair + } + + fn decode(path: &Path, _spec: &ChainSpec) -> Result<Self, Error> { + ssz_decode_file::<ExecutionPayload<E>>(path).map(Into::into) + } + + fn apply_to( + &self, + state: &mut BeaconState<E>, + spec: &ChainSpec, + extra: &Operations<E, Self>, + ) -> Result<(), BlockProcessingError> { + let valid = extra + .execution_metadata + .as_ref() + .map_or(false, |e| e.execution_valid); + if valid { + process_execution_payload(state, self, spec) + } else { + Err(BlockProcessingError::ExecutionInvalid) + } + } +} impl<E: EthSpec, O: Operation<E>> LoadCase for Operations<E, O> { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result<Self, Error> { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index be6c495aae..13c0a8c54a 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -30,6 +30,10 @@ pub trait Handler { } } + fn use_rayon() -> bool { + true + } + fn run_for_fork(&self, fork_name: ForkName) { let fork_name_str = fork_name.to_string(); @@ -48,7 +52,7 @@ pub trait Handler { .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) }; let test_cases = fs::read_dir(&handler_path) - .expect("handler dir exists") + .unwrap_or_else(|e| panic!("handler dir {} exists: {:?}", handler_path.display(), e)) .filter_map(as_directory) .flat_map(|suite| fs::read_dir(suite.path()).expect("suite dir exists")) .filter_map(as_directory) @@ -59,7 +63,7 @@ pub trait Handler { }) .collect(); - let results = Cases { test_cases }.test_results(fork_name); + let results = Cases { test_cases }.test_results(fork_name, Self::use_rayon()); let name = format!( "{}/{}/{}", @@ -460,6 +464,11 @@ impl<E: EthSpec + TypeName> Handler for ForkChoiceHandler<E> { self.handler_name.clone() } + fn use_rayon() -> bool { + // The fork choice tests use `block_on` which can cause panics with rayon. + false + } + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Merge block tests are only enabled for Bellatrix or later. if self.handler_name == "on_merge_block" diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 540fe6903e..c075e89b3f 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -56,6 +56,7 @@ type_name!(Eth1Data); type_name_generic!(ExecutionPayload); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); +type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); type_name_generic!(HistoricalBatch); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 8b415acc80..69b77dd228 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -71,11 +71,17 @@ fn operations_sync_aggregate() { } #[test] -fn operations_execution_payload() { +fn operations_execution_payload_full() { OperationsHandler::<MinimalEthSpec, FullPayload<_>>::default().run(); OperationsHandler::<MainnetEthSpec, FullPayload<_>>::default().run(); } +#[test] +fn operations_execution_payload_blinded() { + OperationsHandler::<MinimalEthSpec, BlindedPayload<_>>::default().run(); + OperationsHandler::<MainnetEthSpec, BlindedPayload<_>>::default().run(); +} + #[test] fn sanity_blocks() { SanityBlocksHandler::<MinimalEthSpec>::default().run(); @@ -379,8 +385,9 @@ fn epoch_processing_participation_record_updates() { #[test] fn epoch_processing_sync_committee_updates() { + // There are presently no mainnet tests, see: + // https://github.com/ethereum/consensus-spec-tests/issues/29 EpochProcessingHandler::<MinimalEthSpec, SyncCommitteeUpdates>::default().run(); - EpochProcessingHandler::<MainnetEthSpec, SyncCommitteeUpdates>::default().run(); } #[test] diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs index c82277dc75..d8df3fd8ae 100644 --- a/testing/eth1_test_rig/src/ganache.rs +++ b/testing/eth1_test_rig/src/ganache.rs @@ -16,17 +16,11 @@ pub struct GanacheInstance { pub port: u16, child: Child, pub web3: Web3<Http>, - network_id: u64, chain_id: u64, } impl GanacheInstance { - fn new_from_child( - mut child: Child, - port: u16, - network_id: u64, - chain_id: u64, - ) -> Result<Self, String> { + fn new_from_child(mut child: Child, port: u16, chain_id: u64) -> Result<Self, String> { let stdout = child .stdout .ok_or("Unable to get stdout for ganache child process")?; @@ -64,14 +58,13 @@ impl GanacheInstance { port, child, web3, - network_id, chain_id, }) } /// Start a new `ganache` process, waiting until it indicates that it is ready to accept /// RPC connections. - pub fn new(network_id: u64, chain_id: u64) -> Result<Self, String> { + pub fn new(chain_id: u64) -> Result<Self, String> { let port = unused_tcp_port()?; let binary = match cfg!(windows) { true => "ganache.cmd", @@ -89,8 +82,6 @@ impl GanacheInstance { .arg(format!("{}", port)) .arg("--mnemonic") .arg("\"vast thought differ pull jewel broom cook wrist tribe word before omit\"") - .arg("--networkId") - .arg(format!("{}", network_id)) .arg("--chain.chainId") .arg(format!("{}", chain_id)) .spawn() @@ -102,7 +93,7 @@ impl GanacheInstance { ) })?; - Self::new_from_child(child, port, network_id, chain_id) + Self::new_from_child(child, port, chain_id) } pub fn fork(&self) -> Result<Self, String> { @@ -128,7 +119,7 @@ impl GanacheInstance { ) })?; - Self::new_from_child(child, port, self.network_id, self.chain_id) + Self::new_from_child(child, port, self.chain_id) } /// Returns the endpoint that this instance is listening on. @@ -136,11 +127,6 @@ impl GanacheInstance { endpoint(self.port) } - /// Returns the network id of the ganache instance - pub fn network_id(&self) -> u64 { - self.network_id - } - /// Returns the chain id of the ganache instance pub fn chain_id(&self) -> u64 { self.chain_id @@ -180,7 +166,7 @@ impl GanacheInstance { } fn endpoint(port: u16) -> String { - format!("http://localhost:{}", port) + format!("http://127.0.0.1:{}", port) } impl Drop for GanacheInstance { diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 52ae3922bc..42081a60e7 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -30,8 +30,8 @@ pub struct GanacheEth1Instance { } impl GanacheEth1Instance { - pub async fn new(network_id: u64, chain_id: u64) -> Result<Self, String> { - let ganache = GanacheInstance::new(network_id, chain_id)?; + pub async fn new(chain_id: u64) -> Result<Self, String> { + let ganache = GanacheInstance::new(chain_id)?; DepositContract::deploy(ganache.web3.clone(), 0, None) .await .map(|deposit_contract| Self { diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index fc8230c7a2..7a8d7e99b5 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -15,3 +15,9 @@ execution_layer = { path = "../../beacon_node/execution_layer" } sensitive_url = { path = "../../common/sensitive_url" } types = { path = "../../consensus/types" } unused_port = { path = "../../common/unused_port" } +ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +ethers-providers = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } +deposit_contract = { path = "../../common/deposit_contract" } +reqwest = { version = "0.11.0", features = ["json"] } +hex = "0.4.2" +fork_choice = { path = "../../consensus/fork_choice" } diff --git a/testing/execution_engine_integration/src/build_utils.rs b/testing/execution_engine_integration/src/build_utils.rs index 4d4a7bf1ce..15e7fdc0f1 100644 --- a/testing/execution_engine_integration/src/build_utils.rs +++ b/testing/execution_engine_integration/src/build_utils.rs @@ -15,51 +15,115 @@ pub fn prepare_dir() -> PathBuf { execution_clients_dir } -pub fn clone_repo(repo_dir: &Path, repo_url: &str) -> bool { - Command::new("git") - .arg("clone") - .arg(repo_url) - .arg("--recursive") - .current_dir(repo_dir) - .output() - .unwrap_or_else(|_| panic!("failed to clone repo at {}", repo_url)) - .status - .success() +pub fn clone_repo(repo_dir: &Path, repo_url: &str) -> Result<(), String> { + output_to_result( + Command::new("git") + .arg("clone") + .arg(repo_url) + .current_dir(repo_dir) + .output() + .map_err(|_| format!("failed to clone repo at {repo_url}"))?, + |_| {}, + ) } -pub fn checkout_branch(repo_dir: &Path, branch_name: &str) -> bool { - Command::new("git") - .arg("checkout") - .arg(branch_name) - .current_dir(repo_dir) - .output() - .unwrap_or_else(|_| { - panic!( - "failed to checkout branch at {:?}/{}", - repo_dir, branch_name, - ) - }) - .status - .success() +pub fn checkout(repo_dir: &Path, revision_or_branch: &str) -> Result<(), String> { + output_to_result( + Command::new("git") + .arg("checkout") + .arg(revision_or_branch) + .current_dir(repo_dir) + .output() + .map_err(|_| { + format!( + "failed to checkout branch or revision at {repo_dir:?}/{revision_or_branch}", + ) + })?, + |_| {}, + )?; + output_to_result( + Command::new("git") + .arg("submodule") + .arg("update") + .arg("--init") + .arg("--recursive") + .current_dir(repo_dir) + .output() + .map_err(|_| { + format!( + "failed to update submodules on branch or revision at {repo_dir:?}/{revision_or_branch}", + ) + })?, + |_| {}, + ) } -pub fn update_branch(repo_dir: &Path, branch_name: &str) -> bool { - Command::new("git") - .arg("pull") - .current_dir(repo_dir) - .output() - .unwrap_or_else(|_| panic!("failed to update branch at {:?}/{}", repo_dir, branch_name)) - .status - .success() +/// Gets the last annotated tag of the given repo. +pub fn get_latest_release(repo_dir: &Path, branch_name: &str) -> Result<String, String> { + // If the directory was already present it is possible we don't have the most recent tags. + // Fetch them + output_to_result( + Command::new("git") + .arg("fetch") + .arg("--tags") + .current_dir(repo_dir) + .output() + .map_err(|e| format!("Failed to fetch tags for {repo_dir:?}: Err: {e}"))?, + |_| {}, + )?; + output_to_result( + Command::new("git") + .arg("describe") + .arg(format!("origin/{branch_name}")) + .arg("--abbrev=0") + .arg("--tags") + .current_dir(repo_dir) + .output() + .map_err(|e| format!("Failed to get latest tag for {repo_dir:?}: Err: {e}"))?, + |stdout| { + let tag = String::from_utf8_lossy(&stdout); + tag.trim().to_string() + }, + ) } -pub fn check_command_output(output: Output, failure_msg: &'static str) { +#[allow(dead_code)] +pub fn update_branch(repo_dir: &Path, branch_name: &str) -> Result<(), String> { + output_to_result( + Command::new("git") + .arg("pull") + .current_dir(repo_dir) + .output() + .map_err(|_| format!("failed to update branch at {:?}/{}", repo_dir, branch_name))?, + |_| {}, + ) +} + +/// Checks the status of the [`std::process::Output`] and applies `f` to `stdout` if the process +/// succeedded. If not, builds a readable error containing stdout and stderr. +fn output_to_result<OnSuccessFn, T>(output: Output, f: OnSuccessFn) -> Result<T, String> +where + OnSuccessFn: Fn(Vec<u8>) -> T, +{ + if !output.status.success() { + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + Err(format!("stderr: {stderr}\nstdout: {stdout}")) + } else { + Ok(f(output.stdout)) + } +} + +pub fn check_command_output<F>(output: Output, failure_msg: F) +where + F: Fn() -> String, +{ if !output.status.success() { if !SUPPRESS_LOGS { dbg!(String::from_utf8_lossy(&output.stdout)); dbg!(String::from_utf8_lossy(&output.stderr)); } - panic!("{}", failure_msg); + panic!("{}", failure_msg()); } } diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index dd5d03be89..ad5af53158 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -1,3 +1,4 @@ +use ethers_providers::{Http, Provider}; use execution_layer::DEFAULT_JWT_FILE; use sensitive_url::SensitiveUrl; use std::path::PathBuf; @@ -5,6 +6,14 @@ use std::process::Child; use tempfile::TempDir; use unused_port::unused_tcp_port; +pub const KEYSTORE_PASSWORD: &str = "testpwd"; +pub const ACCOUNT1: &str = "7b8C3a386C0eea54693fFB0DA17373ffC9228139"; +pub const ACCOUNT2: &str = "dA2DD7560DB7e212B945fC72cEB54B7D8C886D77"; +pub const PRIVATE_KEYS: [&str; 2] = [ + "115fe42a60e5ef45f5490e599add1f03c73aeaca129c2c41451eca6cf8ff9e04", + "6a692e710077d9000be1326acbe32f777b403902ac8779b19eb1398b849c99c3", +]; + /// Defined for each EE type (e.g., Geth, Nethermind, etc). pub trait GenericExecutionEngine: Clone { fn init_datadir() -> TempDir; @@ -25,6 +34,7 @@ pub struct ExecutionEngine<E> { http_port: u16, http_auth_port: u16, child: Child, + pub provider: Provider<Http>, } impl<E> Drop for ExecutionEngine<E> { @@ -43,23 +53,26 @@ impl<E: GenericExecutionEngine> ExecutionEngine<E> { let http_port = unused_tcp_port().unwrap(); let http_auth_port = unused_tcp_port().unwrap(); let child = E::start_client(&datadir, http_port, http_auth_port, jwt_secret_path); + let provider = Provider::<Http>::try_from(format!("http://localhost:{}", http_port)) + .expect("failed to instantiate ethers provider"); Self { engine, datadir, http_port, http_auth_port, child, + provider, } } - pub fn http_url(&self) -> SensitiveUrl { - SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() - } - pub fn http_auth_url(&self) -> SensitiveUrl { SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_auth_port)).unwrap() } + pub fn http_url(&self) -> SensitiveUrl { + SensitiveUrl::parse(&format!("http://127.0.0.1:{}", self.http_port)).unwrap() + } + pub fn datadir(&self) -> PathBuf { self.datadir.path().to_path_buf() } diff --git a/testing/execution_engine_integration/src/genesis_json.rs b/testing/execution_engine_integration/src/genesis_json.rs index c0b94e22e8..17654b292a 100644 --- a/testing/execution_engine_integration/src/genesis_json.rs +++ b/testing/execution_engine_integration/src/genesis_json.rs @@ -32,7 +32,12 @@ pub fn geth_genesis_json() -> Value { "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", "coinbase":"0x0000000000000000000000000000000000000000", "alloc":{ - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b":{"balance":"0x6d6172697573766477000000"} + "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { + "balance": "10000000000000000000000000" + }, + "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { + "balance": "10000000000000000000000000" + }, }, "number":"0x0", "gasUsed":"0x0", @@ -41,76 +46,86 @@ pub fn geth_genesis_json() -> Value { }) } -/// Sourced from: -/// -/// https://github.com/NethermindEth/nethermind/blob/kiln/src/Nethermind/Chains/themerge_kiln_testvectors.json +/// Modified kiln config pub fn nethermind_genesis_json() -> Value { - json!({ - "name": "TheMerge_Devnet", - "engine": { - "clique": { + json!( + { + "name": "lighthouse_test_network", + "engine": { + "Ethash": { "params": { - "period": 5, - "epoch": 30000 - } - } - }, - "params": { - "gasLimitBoundDivisor": "0x400", - "accountStartNonce": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID": 1, - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip158Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip140Transition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip145Transition": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip1283Transition": "0x0", - "eip1283DisableTransition": "0x0", - "eip152Transition": "0x0", - "eip1108Transition": "0x0", - "eip1344Transition": "0x0", - "eip1884Transition": "0x0", - "eip2028Transition": "0x0", - "eip2200Transition": "0x0", - "eip2565Transition": "0x0", - "eip2929Transition": "0x0", - "eip2930Transition": "0x0", - "eip1559Transition": "0x0", - "eip3198Transition": "0x0", - "eip3529Transition": "0x0", - "eip3541Transition": "0x0" - }, - "genesis": { - "seal": { - "ethereum": { - "nonce": "0x42", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + "minimumDifficulty": "0x20000", + "difficultyBoundDivisor": "0x800", + "durationLimit": "0xd", + "blockReward": { + "0x0": "0x1BC16D674EC80000" + }, + "homesteadTransition": "0x0", + "eip100bTransition": "0x0", + "difficultyBombDelays": {} } + } }, - "difficulty": "0x400000000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData":"0x0000000000000000000000000000000000000000000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit":"0x1C9C380", - "author": "0x0000000000000000000000000000000000000000", - "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas":"0x7" - }, - "accounts": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance":"0x6d6172697573766477000000" - } - } - }) + "params": { + "gasLimitBoundDivisor": "0x400", + "registrar": "0x0000000000000000000000000000000000000000", + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID": "0x1469ca", + "MergeForkIdTransition": "0x3e8", + "eip150Transition": "0x0", + "eip158Transition": "0x0", + "eip160Transition": "0x0", + "eip161abcTransition": "0x0", + "eip161dTransition": "0x0", + "eip155Transition": "0x0", + "eip140Transition": "0x0", + "eip211Transition": "0x0", + "eip214Transition": "0x0", + "eip658Transition": "0x0", + "eip145Transition": "0x0", + "eip1014Transition": "0x0", + "eip1052Transition": "0x0", + "eip1283Transition": "0x0", + "eip1283DisableTransition": "0x0", + "eip152Transition": "0x0", + "eip1108Transition": "0x0", + "eip1344Transition": "0x0", + "eip1884Transition": "0x0", + "eip2028Transition": "0x0", + "eip2200Transition": "0x0", + "eip2565Transition": "0x0", + "eip2929Transition": "0x0", + "eip2930Transition": "0x0", + "eip1559Transition": "0x0", + "eip3198Transition": "0x0", + "eip3529Transition": "0x0", + "eip3541Transition": "0x0" + }, + "genesis": { + "seal": { + "ethereum": { + "nonce": "0x1234", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty": "0x01", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "", + "gasLimit": "0x1C9C380" + }, + "accounts": { + "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { + "balance": "10000000000000000000000000" + }, + "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { + "balance": "10000000000000000000000000" + }, + }, + "nodes": [] + } + ) } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 7a6a3803e6..467fd8b430 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -23,20 +23,17 @@ pub fn build(execution_clients_dir: &Path) { if !repo_dir.exists() { // Clone the repo - assert!(build_utils::clone_repo( - execution_clients_dir, - GETH_REPO_URL - )); + build_utils::clone_repo(execution_clients_dir, GETH_REPO_URL).unwrap(); } - // Checkout the correct branch - assert!(build_utils::checkout_branch(&repo_dir, GETH_BRANCH)); - - // Update the branch - assert!(build_utils::update_branch(&repo_dir, GETH_BRANCH)); + // Get the latest tag on the branch + let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth - build_utils::check_command_output(build_result(&repo_dir), "make failed"); + build_utils::check_command_output(build_result(&repo_dir), || { + format!("geth make failed using release {last_release}") + }); } /* @@ -75,7 +72,7 @@ impl GenericExecutionEngine for GethEngine { .output() .expect("failed to init geth"); - build_utils::check_command_output(output, "geth init failed"); + build_utils::check_command_output(output, || "geth init failed".into()); datadir } @@ -93,15 +90,21 @@ impl GenericExecutionEngine for GethEngine { .arg(datadir.path().to_str().unwrap()) .arg("--http") .arg("--http.api") - .arg("engine,eth") + .arg("engine,eth,personal") .arg("--http.port") .arg(http_port.to_string()) .arg("--authrpc.port") .arg(http_auth_port.to_string()) .arg("--port") .arg(network_port.to_string()) + .arg("--allow-insecure-unlock") .arg("--authrpc.jwtsecret") .arg(jwt_secret_path.as_path().to_str().unwrap()) + // This flag is required to help Geth perform reliably when feeding it blocks + // one-by-one. For more information, see: + // + // https://github.com/sigp/lighthouse/pull/3382#issuecomment-1197680345 + .arg("--syncmode=full") .stdout(build_utils::build_stdio()) .stderr(build_utils::build_stdio()) .spawn() diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index 30c8132b7c..bd3436602c 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -1,3 +1,4 @@ +#![recursion_limit = "1024"] /// This binary runs integration tests between Lighthouse and execution engines. /// /// It will first attempt to build any supported integration clients, then it will run tests. @@ -9,13 +10,14 @@ mod genesis_json; mod geth; mod nethermind; mod test_rig; +mod transactions; use geth::GethEngine; use nethermind::NethermindEngine; use test_rig::TestRig; /// Set to `false` to send logs to the console during tests. Logs are useful when debugging. -const SUPPRESS_LOGS: bool = true; +const SUPPRESS_LOGS: bool = false; fn main() { if cfg!(windows) { diff --git a/testing/execution_engine_integration/src/nethermind.rs b/testing/execution_engine_integration/src/nethermind.rs index 833409c69e..1fe7bf0f05 100644 --- a/testing/execution_engine_integration/src/nethermind.rs +++ b/testing/execution_engine_integration/src/nethermind.rs @@ -1,13 +1,14 @@ use crate::build_utils; use crate::execution_engine::GenericExecutionEngine; use crate::genesis_json::nethermind_genesis_json; +use std::env; +use std::fs::File; use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output}; -use std::{env, fs::File}; use tempfile::TempDir; use unused_port::unused_tcp_port; -const NETHERMIND_BRANCH: &str = "kiln"; +const NETHERMIND_BRANCH: &str = "master"; const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind"; fn build_result(repo_dir: &Path) -> Output { @@ -26,24 +27,23 @@ pub fn build(execution_clients_dir: &Path) { if !repo_dir.exists() { // Clone the repo - assert!(build_utils::clone_repo( - execution_clients_dir, - NETHERMIND_REPO_URL - )); + build_utils::clone_repo(execution_clients_dir, NETHERMIND_REPO_URL).unwrap() } - // Checkout the correct branch - assert!(build_utils::checkout_branch(&repo_dir, NETHERMIND_BRANCH)); - - // Update the branch - assert!(build_utils::update_branch(&repo_dir, NETHERMIND_BRANCH)); + // Get the latest tag + let last_release = build_utils::get_latest_release(&repo_dir, NETHERMIND_BRANCH).unwrap(); + build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build nethermind - build_utils::check_command_output(build_result(&repo_dir), "dotnet build failed"); + build_utils::check_command_output(build_result(&repo_dir), || { + format!("nethermind build failed using release {last_release}") + }); // Build nethermind a second time to enable Merge-related features. // Not sure why this is necessary. - build_utils::check_command_output(build_result(&repo_dir), "dotnet build failed"); + build_utils::check_command_output(build_result(&repo_dir), || { + format!("nethermind build failed using release {last_release}") + }); } /* @@ -72,12 +72,10 @@ impl NethermindEngine { impl GenericExecutionEngine for NethermindEngine { fn init_datadir() -> TempDir { let datadir = TempDir::new().unwrap(); - let genesis_json_path = datadir.path().join("genesis.json"); let mut file = File::create(&genesis_json_path).unwrap(); let json = nethermind_genesis_json(); serde_json::to_writer(&mut file, &json).unwrap(); - datadir } @@ -94,15 +92,22 @@ impl GenericExecutionEngine for NethermindEngine { .arg("--datadir") .arg(datadir.path().to_str().unwrap()) .arg("--config") - .arg("themerge_kiln_testvectors") + .arg("kiln") .arg("--Init.ChainSpecPath") .arg(genesis_json_path.to_str().unwrap()) - .arg("--JsonRpc.AdditionalRpcUrls") - .arg(format!("http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client|no-auth,http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client", http_port, http_auth_port)) + .arg("--Merge.TerminalTotalDifficulty") + .arg("0") + .arg("--JsonRpc.Enabled") + .arg("true") .arg("--JsonRpc.EnabledModules") - .arg("net,eth,subscribe,web3,admin,engine") + .arg("net,eth,subscribe,web3,admin,personal") .arg("--JsonRpc.Port") .arg(http_port.to_string()) + .arg("--JsonRpc.AdditionalRpcUrls") + .arg(format!( + "http://localhost:{}|http;ws|net;eth;subscribe;engine;web3;client", + http_auth_port + )) .arg("--Network.DiscoveryPort") .arg(network_port.to_string()) .arg("--Network.P2PPort") diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 79661354de..0aa960bc41 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -1,19 +1,28 @@ -use crate::execution_engine::{ExecutionEngine, GenericExecutionEngine}; -use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; +use crate::execution_engine::{ + ExecutionEngine, GenericExecutionEngine, ACCOUNT1, ACCOUNT2, KEYSTORE_PASSWORD, PRIVATE_KEYS, +}; +use crate::transactions::transactions; +use ethers_providers::Middleware; +use execution_layer::{ + BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadStatus, +}; +use fork_choice::ForkchoiceUpdateParameters; +use reqwest::{header::CONTENT_TYPE, Client}; +use sensitive_url::SensitiveUrl; +use serde_json::{json, Value}; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - MainnetEthSpec, Slot, Uint256, + MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; +const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); -const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(10); - -struct ExecutionPair<E> { +struct ExecutionPair<E, T: EthSpec> { /// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP. - execution_layer: ExecutionLayer, + execution_layer: ExecutionLayer<T>, /// A handle to external EE process, once this is dropped the process will be killed. #[allow(dead_code)] execution_engine: ExecutionEngine<E>, @@ -23,15 +32,72 @@ struct ExecutionPair<E> { /// /// There are two EEs held here so that we can test out-of-order application of payloads, and other /// edge-cases. -pub struct TestRig<E> { +pub struct TestRig<E, T: EthSpec = MainnetEthSpec> { #[allow(dead_code)] runtime: Arc<tokio::runtime::Runtime>, - ee_a: ExecutionPair<E>, - ee_b: ExecutionPair<E>, + ee_a: ExecutionPair<E, T>, + ee_b: ExecutionPair<E, T>, spec: ChainSpec, _runtime_shutdown: exit_future::Signal, } +/// Import a private key into the execution engine and unlock it so that we can +/// make transactions with the corresponding account. +async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: &str) { + for priv_key in priv_keys { + let body = json!( + { + "jsonrpc":"2.0", + "method":"personal_importRawKey", + "params":[priv_key, password], + "id":1 + } + ); + + let client = Client::builder().build().unwrap(); + let request = client + .post(http_url.full.clone()) + .header(CONTENT_TYPE, "application/json") + .json(&body); + + let response: Value = request + .send() + .await + .unwrap() + .error_for_status() + .unwrap() + .json() + .await + .unwrap(); + + let account = response.get("result").unwrap().as_str().unwrap(); + + let body = json!( + { + "jsonrpc":"2.0", + "method":"personal_unlockAccount", + "params":[account, password], + "id":1 + } + ); + + let request = client + .post(http_url.full.clone()) + .header(CONTENT_TYPE, "application/json") + .json(&body); + + let _response: Value = request + .send() + .await + .unwrap() + .error_for_status() + .unwrap() + .json() + .await + .unwrap(); + } +} + impl<E: GenericExecutionEngine> TestRig<E> { pub fn new(generic_engine: E) -> Self { let log = environment::null_logger().unwrap(); @@ -68,7 +134,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { let ee_b = { let execution_engine = ExecutionEngine::new(generic_engine); - let urls = vec![execution_engine.http_url()]; + let urls = vec![execution_engine.http_auth_url()]; let config = execution_layer::Config { execution_endpoints: urls, @@ -98,10 +164,9 @@ impl<E: GenericExecutionEngine> TestRig<E> { } pub fn perform_tests_blocking(&self) { - self.ee_a - .execution_layer - .block_on_generic(|_| async { self.perform_tests().await }) - .unwrap() + self.runtime + .handle() + .block_on(async { self.perform_tests().await }); } pub async fn wait_until_synced(&self) { @@ -126,6 +191,20 @@ impl<E: GenericExecutionEngine> TestRig<E> { pub async fn perform_tests(&self) { self.wait_until_synced().await; + // Import and unlock all private keys to sign transactions + let _ = futures::future::join_all([&self.ee_a, &self.ee_b].iter().map(|ee| { + import_and_unlock( + ee.execution_engine.http_url(), + &PRIVATE_KEYS, + KEYSTORE_PASSWORD, + ) + })) + .await; + + // We hardcode the accounts here since some EEs start with a default unlocked account + let account1 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT1).unwrap()); + let account2 = ethers_core::types::Address::from_slice(&hex::decode(&ACCOUNT2).unwrap()); + /* * Check the transition config endpoint. */ @@ -143,7 +222,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { let terminal_pow_block_hash = self .ee_a .execution_layer - .get_terminal_pow_block_hash(&self.spec) + .get_terminal_pow_block_hash(&self.spec, timestamp_now()) .await .unwrap() .unwrap(); @@ -152,12 +231,23 @@ impl<E: GenericExecutionEngine> TestRig<E> { terminal_pow_block_hash, self.ee_b .execution_layer - .get_terminal_pow_block_hash(&self.spec) + .get_terminal_pow_block_hash(&self.spec, timestamp_now()) .await .unwrap() .unwrap() ); + // Submit transactions before getting payload + let txs = transactions::<MainnetEthSpec>(account1, account2); + for tx in txs.clone().into_iter() { + self.ee_a + .execution_engine + .provider + .send_transaction(tx, None) + .await + .unwrap(); + } + /* * Execution Engine A: * @@ -167,22 +257,79 @@ impl<E: GenericExecutionEngine> TestRig<E> { let parent_hash = terminal_pow_block_hash; let timestamp = timestamp_now(); let prev_randao = Hash256::zero(); + let head_root = Hash256::zero(); + let justified_block_hash = ExecutionBlockHash::zero(); let finalized_block_hash = ExecutionBlockHash::zero(); + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root, + head_hash: Some(parent_hash), + justified_hash: Some(justified_block_hash), + finalized_hash: Some(finalized_block_hash), + }; let proposer_index = 0; + + let prepared = self + .ee_a + .execution_layer + .insert_proposer( + Slot::new(1), // Insert proposer for the next slot + head_root, + proposer_index, + PayloadAttributes { + timestamp, + prev_randao, + suggested_fee_recipient: Address::zero(), + }, + ) + .await; + + assert!(!prepared, "Inserting proposer for the first time"); + + // Make a fcu call with the PayloadAttributes that we inserted previously + let prepare = self + .ee_a + .execution_layer + .notify_forkchoice_updated( + parent_hash, + justified_block_hash, + finalized_block_hash, + Slot::new(0), + Hash256::zero(), + ) + .await + .unwrap(); + + assert_eq!(prepare, PayloadStatus::Valid); + + // Add a delay to give the EE sufficient time to pack the + // submitted transactions into a payload. + // This is required when running on under resourced nodes and + // in CI. + sleep(Duration::from_secs(3)).await; + + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot: Slot::new(0), + chain_health: ChainHealth::Healthy, + }; let valid_payload = self .ee_a .execution_layer - .get_payload::<MainnetEthSpec, FullPayload<MainnetEthSpec>>( + .get_payload::<FullPayload<MainnetEthSpec>>( parent_hash, timestamp, prev_randao, - finalized_block_hash, proposer_index, + forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() .execution_payload; + assert_eq!(valid_payload.transactions.len(), txs.len()); + /* * Execution Engine A: * @@ -196,7 +343,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_a .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Syncing); @@ -230,7 +383,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_a .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -260,17 +419,23 @@ impl<E: GenericExecutionEngine> TestRig<E> { let parent_hash = valid_payload.block_hash; let timestamp = valid_payload.timestamp + 1; let prev_randao = Hash256::zero(); - let finalized_block_hash = ExecutionBlockHash::zero(); let proposer_index = 0; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot: Slot::new(0), + chain_health: ChainHealth::Healthy, + }; let second_payload = self .ee_a .execution_layer - .get_payload::<MainnetEthSpec, FullPayload<MainnetEthSpec>>( + .get_payload::<FullPayload<MainnetEthSpec>>( parent_hash, timestamp, prev_randao, - finalized_block_hash, proposer_index, + forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() @@ -313,7 +478,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_a .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -329,7 +500,11 @@ impl<E: GenericExecutionEngine> TestRig<E> { .notify_new_payload(&second_payload) .await .unwrap(); - assert_eq!(status, PayloadStatus::Accepted); + // TODO: we should remove the `Accepted` status here once Geth fixes it + assert!(matches!( + status, + PayloadStatus::Syncing | PayloadStatus::Accepted + )); /* * Execution Engine B: @@ -343,7 +518,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_b .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Syncing); @@ -389,7 +570,13 @@ impl<E: GenericExecutionEngine> TestRig<E> { let status = self .ee_b .execution_layer - .notify_forkchoice_updated(head_block_hash, finalized_block_hash, slot, head_block_root) + .notify_forkchoice_updated( + head_block_hash, + justified_block_hash, + finalized_block_hash, + slot, + head_block_root, + ) .await .unwrap(); assert_eq!(status, PayloadStatus::Valid); @@ -400,7 +587,7 @@ impl<E: GenericExecutionEngine> TestRig<E> { /// /// Panic if payload reconstruction fails. async fn check_payload_reconstruction<E: GenericExecutionEngine>( - ee: &ExecutionPair<E>, + ee: &ExecutionPair<E, MainnetEthSpec>, payload: &ExecutionPayload<MainnetEthSpec>, ) { let reconstructed = ee diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs new file mode 100644 index 0000000000..144946682b --- /dev/null +++ b/testing/execution_engine_integration/src/transactions.rs @@ -0,0 +1,87 @@ +use deposit_contract::{encode_eth1_tx_data, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS}; +use ethers_core::types::{ + transaction::{eip2718::TypedTransaction, eip2930::AccessList}, + Address, Bytes, Eip1559TransactionRequest, TransactionRequest, +}; +use types::{DepositData, EthSpec, Hash256, Keypair, Signature}; + +/// Hardcoded deposit contract address based on sender address and nonce +pub const DEPOSIT_CONTRACT_ADDRESS: &str = "64f43BEc7F86526686C931d65362bB8698872F90"; + +#[derive(Debug)] +pub enum Transaction { + Transfer(Address, Address), + TransferLegacy(Address, Address), + TransferAccessList(Address, Address), + DeployDepositContract(Address), + DepositDepositContract { + sender: Address, + deposit_contract_address: Address, + }, +} + +/// Get a list of transactions to publish to the execution layer. +pub fn transactions<E: EthSpec>(account1: Address, account2: Address) -> Vec<TypedTransaction> { + vec![ + Transaction::Transfer(account1, account2).transaction::<E>(), + Transaction::TransferLegacy(account1, account2).transaction::<E>(), + Transaction::TransferAccessList(account1, account2).transaction::<E>(), + Transaction::DeployDepositContract(account1).transaction::<E>(), + Transaction::DepositDepositContract { + sender: account1, + deposit_contract_address: ethers_core::types::Address::from_slice( + &hex::decode(&DEPOSIT_CONTRACT_ADDRESS).unwrap(), + ), + } + .transaction::<E>(), + ] +} + +impl Transaction { + pub fn transaction<E: EthSpec>(&self) -> TypedTransaction { + match &self { + Self::TransferLegacy(from, to) => TransactionRequest::new() + .from(*from) + .to(*to) + .value(1) + .into(), + Self::Transfer(from, to) => Eip1559TransactionRequest::new() + .from(*from) + .to(*to) + .value(1) + .into(), + Self::TransferAccessList(from, to) => TransactionRequest::new() + .from(*from) + .to(*to) + .value(1) + .with_access_list(AccessList::default()) + .into(), + Self::DeployDepositContract(addr) => TransactionRequest::new() + .from(*addr) + .data(Bytes::from(BYTECODE.to_vec())) + .gas(CONTRACT_DEPLOY_GAS) + .into(), + Self::DepositDepositContract { + sender, + deposit_contract_address, + } => { + let keypair = Keypair::random(); + + let mut deposit = DepositData { + pubkey: keypair.pk.into(), + withdrawal_credentials: Hash256::zero(), + amount: 32_000_000_000, + signature: Signature::empty().into(), + }; + + deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); + TransactionRequest::new() + .from(*sender) + .to(*deposit_contract_address) + .data(Bytes::from(encode_eth1_tx_data(&deposit).unwrap())) + .gas(DEPOSIT_GAS) + .into() + } + } + } +} diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 8e4b8595df..2c9bd5939f 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -13,3 +13,4 @@ eth2 = { path = "../../common/eth2" } validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } sensitive_url = { path = "../../common/sensitive_url" } +execution_layer = { path = "../../beacon_node/execution_layer" } \ No newline at end of file diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index acf9bb9e68..0933bff4c6 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -17,6 +17,9 @@ use validator_dir::insecure_keys::build_deterministic_validator_dirs; pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient}; pub use environment; pub use eth2; +pub use execution_layer::test_utils::{ + Config as MockServerConfig, MockExecutionConfig, MockServer, +}; pub use validator_client::Config as ValidatorConfig; /// The global timeout for HTTP requests to the beacon node. @@ -211,3 +214,29 @@ impl<E: EthSpec> LocalValidatorClient<E> { }) } } + +/// Provides an execution engine api server that is running in the current process on a given tokio executor (it +/// is _local_ to this process). +/// +/// Intended for use in testing and simulation. Not for production. +pub struct LocalExecutionNode<E: EthSpec> { + pub server: MockServer<E>, + pub datadir: TempDir, +} + +impl<E: EthSpec> LocalExecutionNode<E> { + pub fn new(context: RuntimeContext<E>, config: MockExecutionConfig) -> Self { + let datadir = TempBuilder::new() + .prefix("lighthouse_node_test_rig_el") + .tempdir() + .expect("should create temp directory for client datadir"); + let jwt_file_path = datadir.path().join("jwt.hex"); + if let Err(e) = std::fs::write(&jwt_file_path, config.jwt_key.hex_string()) { + panic!("Failed to write jwt file {}", e); + } + Self { + server: MockServer::new_with_config(&context.executor.handle().unwrap(), config), + datadir, + } + } +} diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 6770508435..a01c133fd9 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -9,6 +9,7 @@ edition = "2021" [dependencies] node_test_rig = { path = "../node_test_rig" } eth1 = {path = "../../beacon_node/eth1"} +execution_layer = {path = "../../beacon_node/execution_layer"} types = { path = "../../consensus/types" } parking_lot = "0.12.0" futures = "0.3.7" diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 7ff387b9c6..02f4f76d51 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,7 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, Slot, Unsigned}; +use types::{Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, Unsigned}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. @@ -149,19 +149,19 @@ pub async fn verify_fork_version<E: EthSpec>( network: LocalNetwork<E>, fork_epoch: Epoch, slot_duration: Duration, - altair_fork_version: [u8; 4], + fork_version: [u8; 4], ) -> Result<(), String> { epoch_delay(fork_epoch, slot_duration, E::slots_per_epoch()).await; for remote_node in network.remote_nodes()? { - let fork_version = remote_node + let remote_fork_version = remote_node .get_beacon_states_fork(StateId::Head) .await .map(|resp| resp.unwrap().data.current_version) .map_err(|e| format!("Failed to get fork from beacon node: {:?}", e))?; - if fork_version != altair_fork_version { + if fork_version != remote_fork_version { return Err(format!( "Fork version after FORK_EPOCH is incorrect, got: {:?}, expected: {:?}", - fork_version, altair_fork_version, + remote_fork_version, fork_version, )); } } @@ -207,3 +207,39 @@ pub async fn verify_full_sync_aggregates_up_to<E: EthSpec>( Ok(()) } + +/// Verify that the first merged PoS block got finalized. +pub async fn verify_transition_block_finalized<E: EthSpec>( + network: LocalNetwork<E>, + transition_epoch: Epoch, + slot_duration: Duration, + should_verify: bool, +) -> Result<(), String> { + if !should_verify { + return Ok(()); + } + epoch_delay(transition_epoch + 2, slot_duration, E::slots_per_epoch()).await; + let mut block_hashes = Vec::new(); + for remote_node in network.remote_nodes()?.iter() { + let execution_block_hash: ExecutionBlockHash = remote_node + .get_beacon_blocks::<E>(BlockId::Finalized) + .await + .map(|body| body.unwrap().data) + .map_err(|e| format!("Get state root via http failed: {:?}", e))? + .message() + .execution_payload() + .map(|payload| payload.execution_payload.block_hash) + .map_err(|e| format!("Execution payload does not exist: {:?}", e))?; + block_hashes.push(execution_block_hash); + } + + let first = block_hashes[0]; + if first.into_root() != Hash256::zero() && block_hashes.iter().all(|&item| item == first) { + Ok(()) + } else { + Err(format!( + "Terminal block not finalized on all nodes Finalized block hashes:{:?}", + block_hashes + )) + } +} diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index 28f1a25627..f1196502fb 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -36,6 +36,11 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("3") .help("Speed up factor. Please use a divisor of 12.")) + .arg(Arg::with_name("post-merge") + .short("m") + .long("post-merge") + .takes_value(false) + .help("Simulate the merge transition")) .arg(Arg::with_name("continue_after_checks") .short("c") .long("continue_after_checks") diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 80fc755d52..613573cd0d 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -1,9 +1,10 @@ -use crate::local_network::INVALID_ADDRESS; +use crate::local_network::{EXECUTION_PORT, INVALID_ADDRESS, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; -use eth1::http::Eth1Id; -use eth1::{DEFAULT_CHAIN_ID, DEFAULT_NETWORK_ID}; +use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; use eth1_test_rig::GanacheEth1Instance; + +use execution_layer::http::deposit_methods::Eth1Id; use futures::prelude::*; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, @@ -17,8 +18,12 @@ use std::time::Duration; use tokio::time::sleep; use types::{Epoch, EthSpec, MinimalEthSpec}; -const FORK_EPOCH: u64 = 2; const END_EPOCH: u64 = 16; +const ALTAIR_FORK_EPOCH: u64 = 1; +const BELLATRIX_FORK_EPOCH: u64 = 2; + +const SUGGESTED_FEE_RECIPIENT: [u8; 20] = + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); @@ -27,10 +32,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let speed_up_factor = value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); let continue_after_checks = matches.is_present("continue_after_checks"); + let post_merge_sim = matches.is_present("post-merge"); println!("Beacon Chain Simulator:"); println!(" nodes:{}", node_count); println!(" validators_per_node:{}", validators_per_node); + println!(" post merge simulation:{}", post_merge_sim); println!(" continue_after_checks:{}", continue_after_checks); // Generate the directories and keystores required for the validator clients. @@ -58,6 +65,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: log_level, logfile_debug_level: "debug", log_format, + log_color: false, max_log_size: 0, max_log_number: 0, compression: false, @@ -71,6 +79,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let total_validator_count = validators_per_node * node_count; let altair_fork_version = spec.altair_fork_version; + let bellatrix_fork_version = spec.bellatrix_fork_version; spec.seconds_per_slot /= speed_up_factor; spec.seconds_per_slot = max(1, spec.seconds_per_slot); @@ -79,8 +88,14 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { spec.min_genesis_time = 0; spec.min_genesis_active_validator_count = total_validator_count as u64; spec.seconds_per_eth1_block = eth1_block_time.as_secs(); - spec.altair_fork_epoch = Some(Epoch::new(FORK_EPOCH)); + spec.altair_fork_epoch = Some(Epoch::new(ALTAIR_FORK_EPOCH)); + // Set these parameters only if we are doing a merge simulation + if post_merge_sim { + spec.terminal_total_difficulty = TERMINAL_DIFFICULTY.into(); + spec.bellatrix_fork_epoch = Some(Epoch::new(BELLATRIX_FORK_EPOCH)); + } + let seconds_per_slot = spec.seconds_per_slot; let slot_duration = Duration::from_secs(spec.seconds_per_slot); let initial_validator_count = spec.min_genesis_active_validator_count as usize; let deposit_amount = env.eth2_config.spec.max_effective_balance; @@ -92,10 +107,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit * validators. */ - let ganache_eth1_instance = - GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into()).await?; + let ganache_eth1_instance = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; let deposit_contract = ganache_eth1_instance.deposit_contract; - let network_id = ganache_eth1_instance.ganache.network_id(); let chain_id = ganache_eth1_instance.ganache.chain_id(); let ganache = ganache_eth1_instance.ganache; let eth1_endpoint = SensitiveUrl::parse(ganache.endpoint().as_str()) @@ -124,7 +137,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let mut beacon_config = testing_client_config(); beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoints = vec![eth1_endpoint]; + beacon_config.eth1.endpoints = Eth1Endpoint::NoAuth(vec![eth1_endpoint]); beacon_config.eth1.deposit_contract_address = deposit_contract_address; beacon_config.eth1.deposit_contract_deploy_block = 0; beacon_config.eth1.lowest_cached_block_number = 0; @@ -133,12 +146,24 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.dummy_eth1_backend = false; beacon_config.sync_eth1_chain = true; beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; - beacon_config.eth1.network_id = Eth1Id::from(network_id); beacon_config.eth1.chain_id = Eth1Id::from(chain_id); beacon_config.network.target_peers = node_count - 1; beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + if post_merge_sim { + let el_config = execution_layer::Config { + execution_endpoints: vec![SensitiveUrl::parse(&format!( + "http://localhost:{}", + EXECUTION_PORT + )) + .unwrap()], + ..Default::default() + }; + + beacon_config.execution_layer = Some(el_config); + } + /* * Create a new `LocalNetwork` with one beacon node. */ @@ -150,10 +175,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { for i in 0..node_count - 1 { let mut config = beacon_config.clone(); if i % 2 == 0 { - config.eth1.endpoints.insert( - 0, - SensitiveUrl::parse(INVALID_ADDRESS).expect("Unable to parse invalid address"), - ); + if let Eth1Endpoint::NoAuth(endpoints) = &mut config.eth1.endpoints { + endpoints.insert( + 0, + SensitiveUrl::parse(INVALID_ADDRESS) + .expect("Unable to parse invalid address"), + ) + } } network.add_beacon_node(config).await?; } @@ -167,9 +195,13 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let network_1 = network.clone(); executor.spawn( async move { + let mut validator_config = testing_validator_config(); + if post_merge_sim { + validator_config.fee_recipient = Some(SUGGESTED_FEE_RECIPIENT.into()); + } println!("Adding validator client {}", i); network_1 - .add_validator_client(testing_validator_config(), i, files, i % 2 == 0) + .add_validator_client(validator_config, i, files, i % 2 == 0) .await .expect("should add validator"); }, @@ -181,6 +213,21 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { println!("Duration to genesis: {}", duration_to_genesis.as_secs()); sleep(duration_to_genesis).await; + if post_merge_sim { + let executor = executor.clone(); + let network_2 = network.clone(); + executor.spawn( + async move { + println!("Mining pow blocks"); + let mut interval = tokio::time::interval(Duration::from_secs(seconds_per_slot)); + for i in 1..=TERMINAL_BLOCK + 1 { + interval.tick().await; + let _ = network_2.mine_pow_blocks(i); + } + }, + "pow_mining", + ); + } /* * Start the checks that ensure the network performs as expected. * @@ -189,7 +236,16 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * tests start at the right time. Whilst this is works well for now, it's subject to * breakage by changes to the VC. */ - let (finalization, block_prod, validator_count, onboarding, fork, sync_aggregate) = futures::join!( + + let ( + finalization, + block_prod, + validator_count, + onboarding, + fork, + sync_aggregate, + transition, + ) = futures::join!( // Check that the chain finalizes at the first given opportunity. checks::verify_first_finalization(network.clone(), slot_duration), // Check that a block is produced at every slot. @@ -211,21 +267,36 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { slot_duration, total_validator_count, ), - // Check that all nodes have transitioned to the new fork. + // Check that all nodes have transitioned to the required fork. checks::verify_fork_version( network.clone(), - Epoch::new(FORK_EPOCH), + if post_merge_sim { + Epoch::new(BELLATRIX_FORK_EPOCH) + } else { + Epoch::new(ALTAIR_FORK_EPOCH) + }, slot_duration, - altair_fork_version + if post_merge_sim { + bellatrix_fork_version + } else { + altair_fork_version + } ), // Check that all sync aggregates are full. checks::verify_full_sync_aggregates_up_to( network.clone(), // Start checking for sync_aggregates at `FORK_EPOCH + 1` to account for // inefficiencies in finding subnet peers at the `fork_slot`. - Epoch::new(FORK_EPOCH + 1).start_slot(MinimalEthSpec::slots_per_epoch()), + Epoch::new(ALTAIR_FORK_EPOCH + 1).start_slot(MinimalEthSpec::slots_per_epoch()), Epoch::new(END_EPOCH).start_slot(MinimalEthSpec::slots_per_epoch()), slot_duration, + ), + // Check that the transition block is finalized. + checks::verify_transition_block_finalized( + network.clone(), + Epoch::new(TERMINAL_BLOCK / MinimalEthSpec::slots_per_epoch()), + slot_duration, + post_merge_sim ) ); @@ -235,6 +306,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { onboarding?; fork?; sync_aggregate?; + transition?; // The `final_future` either completes immediately or never completes, depending on the value // of `continue_after_checks`. diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 6cfc3e6db7..8df912ed16 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,7 +1,8 @@ use node_test_rig::{ environment::RuntimeContext, eth2::{types::StateId, BeaconNodeHttpClient}, - ClientConfig, LocalBeaconNode, LocalValidatorClient, ValidatorConfig, ValidatorFiles, + ClientConfig, LocalBeaconNode, LocalExecutionNode, LocalValidatorClient, MockExecutionConfig, + MockServerConfig, ValidatorConfig, ValidatorFiles, }; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; @@ -15,11 +16,17 @@ use types::{Epoch, EthSpec}; const BOOTNODE_PORT: u16 = 42424; pub const INVALID_ADDRESS: &str = "http://127.0.0.1:42423"; +pub const EXECUTION_PORT: u16 = 4000; + +pub const TERMINAL_DIFFICULTY: u64 = 6400; +pub const TERMINAL_BLOCK: u64 = 64; + /// Helper struct to reduce `Arc` usage. pub struct Inner<E: EthSpec> { pub context: RuntimeContext<E>, pub beacon_nodes: RwLock<Vec<LocalBeaconNode<E>>>, pub validator_clients: RwLock<Vec<LocalValidatorClient<E>>>, + pub execution_nodes: RwLock<Vec<LocalExecutionNode<E>>>, } /// Represents a set of interconnected `LocalBeaconNode` and `LocalValidatorClient`. @@ -46,7 +53,7 @@ impl<E: EthSpec> Deref for LocalNetwork<E> { } impl<E: EthSpec> LocalNetwork<E> { - /// Creates a new network with a single `BeaconNode`. + /// Creates a new network with a single `BeaconNode` and a connected `ExecutionNode`. pub async fn new( context: RuntimeContext<E>, mut beacon_config: ClientConfig, @@ -56,6 +63,30 @@ impl<E: EthSpec> LocalNetwork<E> { beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT); beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT); beacon_config.network.discv5_config.table_filter = |_| true; + + let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { + let mock_execution_config = MockExecutionConfig { + server_config: MockServerConfig { + listen_port: EXECUTION_PORT, + ..Default::default() + }, + terminal_block: TERMINAL_BLOCK, + terminal_difficulty: TERMINAL_DIFFICULTY.into(), + ..Default::default() + }; + let execution_node = LocalExecutionNode::new( + context.service_context("boot_node_el".into()), + mock_execution_config, + ); + el_config.default_datadir = execution_node.datadir.path().to_path_buf(); + el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")]; + el_config.execution_endpoints = + vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()]; + vec![execution_node] + } else { + vec![] + }; + let beacon_node = LocalBeaconNode::production(context.service_context("boot_node".into()), beacon_config) .await?; @@ -63,6 +94,7 @@ impl<E: EthSpec> LocalNetwork<E> { inner: Arc::new(Inner { context, beacon_nodes: RwLock::new(vec![beacon_node]), + execution_nodes: RwLock::new(execution_node), validator_clients: RwLock::new(vec![]), }), }) @@ -87,6 +119,7 @@ impl<E: EthSpec> LocalNetwork<E> { /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. pub async fn add_beacon_node(&self, mut beacon_config: ClientConfig) -> Result<(), String> { let self_1 = self.clone(); + let count = self.beacon_node_count() as u16; println!("Adding beacon node.."); { let read_lock = self.beacon_nodes.read(); @@ -99,20 +132,38 @@ impl<E: EthSpec> LocalNetwork<E> { .enr() .expect("bootnode must have a network"), ); - let count = self.beacon_node_count() as u16; beacon_config.network.discovery_port = BOOTNODE_PORT + count; beacon_config.network.libp2p_port = BOOTNODE_PORT + count; beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT + count); beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT + count); beacon_config.network.discv5_config.table_filter = |_| true; } + if let Some(el_config) = &mut beacon_config.execution_layer { + let config = MockExecutionConfig { + server_config: MockServerConfig { + listen_port: EXECUTION_PORT + count, + ..Default::default() + }, + terminal_block: TERMINAL_BLOCK, + terminal_difficulty: TERMINAL_DIFFICULTY.into(), + ..Default::default() + }; + let execution_node = LocalExecutionNode::new( + self.context.service_context(format!("node_{}_el", count)), + config, + ); + el_config.default_datadir = execution_node.datadir.path().to_path_buf(); + el_config.secret_files = vec![execution_node.datadir.path().join("jwt.hex")]; + el_config.execution_endpoints = + vec![SensitiveUrl::parse(&execution_node.server.url()).unwrap()]; + self.execution_nodes.write().push(execution_node); + } // We create the beacon node without holding the lock, so that the lock isn't held // across the await. This is only correct if this function never runs in parallel // with itself (which at the time of writing, it does not). - let index = self_1.beacon_nodes.read().len(); let beacon_node = LocalBeaconNode::production( - self.context.service_context(format!("node_{}", index)), + self.context.service_context(format!("node_{}", count)), beacon_config, ) .await?; @@ -184,6 +235,16 @@ impl<E: EthSpec> LocalNetwork<E> { .map(|body| body.unwrap().data.finalized.epoch) } + pub fn mine_pow_blocks(&self, block_number: u64) -> Result<(), String> { + let execution_nodes = self.execution_nodes.read(); + for execution_node in execution_nodes.iter() { + let mut block_gen = execution_node.server.ctx.execution_block_generator.write(); + block_gen.insert_pow_block(block_number)?; + println!("Mined pow block {}", block_number); + } + Ok(()) + } + pub async fn duration_to_genesis(&self) -> Duration { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 5d2f0be72f..28b8719843 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -50,6 +50,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { debug_level: log_level, logfile_debug_level: "debug", log_format, + log_color: false, max_log_size: 0, max_log_number: 0, compression: false, diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 3bb460c9fe..07d774b8d4 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -51,6 +51,7 @@ fn syncing_sim( debug_level: log_level, logfile_debug_level: "debug", log_format, + log_color: false, max_log_size: 0, max_log_number: 0, compression: false, diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 4e93db3b32..6da9f2f4a6 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -12,3 +12,4 @@ types = { path = "../../consensus/types" } eth2_ssz = "0.4.1" beacon_chain = { path = "../../beacon_node/beacon_chain" } lazy_static = "1.4.0" +tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 88e6d41256..d581eba965 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -15,6 +15,7 @@ struct ExitTest { validator_index: u64, exit_epoch: Epoch, state_epoch: Epoch, + #[allow(clippy::type_complexity)] state_modifier: Box<dyn FnOnce(&mut BeaconState<E>)>, #[allow(clippy::type_complexity)] block_modifier: @@ -37,11 +38,12 @@ impl Default for ExitTest { } impl ExitTest { - fn block_and_pre_state(self) -> (SignedBeaconBlock<E>, BeaconState<E>) { + async fn block_and_pre_state(self) -> (SignedBeaconBlock<E>, BeaconState<E>) { let harness = get_harness::<E>( self.state_epoch.start_slot(E::slots_per_epoch()), VALIDATOR_COUNT, - ); + ) + .await; let mut state = harness.get_current_state(); (self.state_modifier)(&mut state); @@ -49,11 +51,12 @@ impl ExitTest { let validator_index = self.validator_index; let exit_epoch = self.exit_epoch; - let (signed_block, state) = - harness.make_block_with_modifier(state.clone(), state.slot() + 1, |block| { + let (signed_block, state) = harness + .make_block_with_modifier(state.clone(), state.slot() + 1, |block| { harness.add_voluntary_exit(block, validator_index, exit_epoch); block_modifier(&harness, block); - }); + }) + .await; (signed_block, state) } @@ -73,12 +76,12 @@ impl ExitTest { } #[cfg(all(test, not(debug_assertions)))] - fn run(self) -> BeaconState<E> { + async fn run(self) -> BeaconState<E> { let spec = &E::default_spec(); let expected = self.expected.clone(); assert_eq!(STATE_EPOCH, spec.shard_committee_period); - let (block, mut state) = self.block_and_pre_state(); + let (block, mut state) = self.block_and_pre_state().await; let result = Self::process(&block, &mut state); @@ -87,8 +90,8 @@ impl ExitTest { state } - fn test_vector(self, title: String) -> TestVector { - let (block, pre_state) = self.block_and_pre_state(); + async fn test_vector(self, title: String) -> TestVector { + let (block, pre_state) = self.block_and_pre_state().await; let mut post_state = pre_state.clone(); let (post_state, error) = match Self::process(&block, &mut post_state) { Ok(_) => (Some(post_state), None), @@ -344,14 +347,14 @@ mod custom_tests { ); } - #[test] - fn valid() { - let state = ExitTest::default().run(); + #[tokio::test] + async fn valid() { + let state = ExitTest::default().run().await; assert_exited(&state, VALIDATOR_INDEX as usize); } - #[test] - fn valid_three() { + #[tokio::test] + async fn valid_three() { let state = ExitTest { block_modifier: Box::new(|harness, block| { harness.add_voluntary_exit(block, 1, STATE_EPOCH); @@ -359,7 +362,8 @@ mod custom_tests { }), ..ExitTest::default() } - .run(); + .run() + .await; for i in &[VALIDATOR_INDEX, 1, 2] { assert_exited(&state, *i as usize); diff --git a/testing/state_transition_vectors/src/macros.rs b/testing/state_transition_vectors/src/macros.rs index 81f8171852..5dafbf549a 100644 --- a/testing/state_transition_vectors/src/macros.rs +++ b/testing/state_transition_vectors/src/macros.rs @@ -4,11 +4,11 @@ /// - `mod tests`: runs all the test vectors locally. macro_rules! vectors_and_tests { ($($name: ident, $test: expr),*) => { - pub fn vectors() -> Vec<TestVector> { + pub async fn vectors() -> Vec<TestVector> { let mut vec = vec![]; $( - vec.push($test.test_vector(stringify!($name).into())); + vec.push($test.test_vector(stringify!($name).into()).await); )* vec @@ -18,9 +18,9 @@ macro_rules! vectors_and_tests { mod tests { use super::*; $( - #[test] - fn $name() { - $test.run(); + #[tokio::test] + async fn $name() { + $test.run().await; } )* } diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index d66842e5a1..3e7c37af54 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -25,8 +25,9 @@ pub const BASE_VECTOR_DIR: &str = "vectors"; pub const SLOT_OFFSET: u64 = 1; /// Writes all known test vectors to `CARGO_MANIFEST_DIR/vectors`. -fn main() { - match write_all_vectors() { +#[tokio::main] +async fn main() { + match write_all_vectors().await { Ok(()) => exit(0), Err(e) => { eprintln!("Error: {}", e); @@ -49,7 +50,7 @@ lazy_static! { static ref KEYPAIRS: Vec<Keypair> = generate_deterministic_keypairs(VALIDATOR_COUNT); } -fn get_harness<E: EthSpec>( +async fn get_harness<E: EthSpec>( slot: Slot, validator_count: usize, ) -> BeaconChainHarness<EphemeralHarnessType<E>> { @@ -61,23 +62,25 @@ fn get_harness<E: EthSpec>( let skip_to_slot = slot - SLOT_OFFSET; if skip_to_slot > Slot::new(0) { let state = harness.get_current_state(); - harness.add_attested_blocks_at_slots( - state, - Hash256::zero(), - (skip_to_slot.as_u64()..slot.as_u64()) - .map(Slot::new) - .collect::<Vec<_>>() - .as_slice(), - (0..validator_count).collect::<Vec<_>>().as_slice(), - ); + harness + .add_attested_blocks_at_slots( + state, + Hash256::zero(), + (skip_to_slot.as_u64()..slot.as_u64()) + .map(Slot::new) + .collect::<Vec<_>>() + .as_slice(), + (0..validator_count).collect::<Vec<_>>().as_slice(), + ) + .await; } harness } /// Writes all vectors to file. -fn write_all_vectors() -> Result<(), String> { - write_vectors_to_file("exit", &exit::vectors()) +async fn write_all_vectors() -> Result<(), String> { + write_vectors_to_file("exit", &exit::vectors().await) } /// Writes a list of `vectors` to the `title` dir. diff --git a/testing/web3signer_tests/build.rs b/testing/web3signer_tests/build.rs index ac34b5197f..f62dff0b6f 100644 --- a/testing/web3signer_tests/build.rs +++ b/testing/web3signer_tests/build.rs @@ -29,6 +29,8 @@ pub async fn download_binary(dest_dir: PathBuf) { let version = if let Some(version) = FIXED_VERSION_STRING { version.to_string() + } else if let Ok(env_version) = env::var("LIGHTHOUSE_WEB3SIGNER_VERSION") { + env_version } else { // Get the latest release of the web3 signer repo. let latest_response: Value = client diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 800f988654..4f9a574f84 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -15,7 +15,7 @@ #[cfg(all(test, unix, not(debug_assertions)))] mod tests { use account_utils::validator_definitions::{ - SigningDefinition, ValidatorDefinition, ValidatorDefinitions, + SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; @@ -67,6 +67,7 @@ mod tests { impl SignedObject for SyncSelectionProof {} impl SignedObject for SyncCommitteeMessage {} impl SignedObject for SignedContributionAndProof<E> {} + impl SignedObject for SignedValidatorRegistrationData {} /// A file format used by Web3Signer to discover and unlock keystores. #[derive(Serialize)] @@ -301,6 +302,7 @@ mod tests { let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let config = validator_client::Config::default(); let validator_store = ValidatorStore::<_, E>::new( initialized_validators, @@ -309,6 +311,7 @@ mod tests { spec, None, slot_clock, + &config, executor, log.clone(), ); @@ -357,6 +360,8 @@ mod tests { voting_public_key: validator_pubkey.clone(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::default(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: signer_rig.keystore_path.clone(), @@ -373,14 +378,16 @@ mod tests { voting_public_key: validator_pubkey.clone(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::default(), - signing_definition: SigningDefinition::Web3Signer { + signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url: signer_rig.url.to_string(), root_certificate_path: Some(root_certificate_path()), request_timeout_ms: None, client_identity_path: Some(client_identity_path()), client_identity_password: Some(client_identity_password()), - }, + }), }; ValidatorStoreRig::new(vec![validator_definition], spec).await }; @@ -448,6 +455,16 @@ mod tests { } } + fn get_validator_registration(pubkey: PublicKeyBytes) -> ValidatorRegistrationData { + let fee_recipient = Address::repeat_byte(42); + ValidatorRegistrationData { + fee_recipient, + gas_limit: 30_000_000, + timestamp: 100, + pubkey, + } + } + /// Test all the "base" (phase 0) types. async fn test_base_types(network: &str, listen_port: u16) { let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); @@ -499,6 +516,17 @@ mod tests { .await .unwrap() }) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); + validator_store + .sign_validator_registration_data(val_reg_data) + .await + .unwrap() + }, + ) .await; } @@ -575,6 +603,39 @@ mod tests { .unwrap() }, ) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); + validator_store + .sign_validator_registration_data(val_reg_data) + .await + .unwrap() + }, + ) + .await; + } + + /// Test all the Merge types. + async fn test_merge_types(network: &str, listen_port: u16) { + let network_config = Eth2NetworkConfig::constant(network).unwrap().unwrap(); + let spec = &network_config.chain_spec::<E>().unwrap(); + let merge_fork_slot = spec + .bellatrix_fork_epoch + .unwrap() + .start_slot(E::slots_per_epoch()); + + TestingRig::new(network, spec.clone(), listen_port) + .await + .assert_signatures_match("beacon_block_merge", |pubkey, validator_store| async move { + let mut merge_block = BeaconBlockMerge::empty(spec); + merge_block.slot = merge_fork_slot; + validator_store + .sign_block(pubkey, BeaconBlock::Merge(merge_block), merge_fork_slot) + .await + .unwrap() + }) .await; } @@ -597,4 +658,19 @@ mod tests { async fn prater_altair_types() { test_altair_types("prater", 4247).await } + + #[tokio::test] + async fn ropsten_base_types() { + test_base_types("ropsten", 4250).await + } + + #[tokio::test] + async fn ropsten_altair_types() { + test_altair_types("ropsten", 4251).await + } + + #[tokio::test] + async fn ropsten_merge_types() { + test_merge_types("ropsten", 4252).await + } } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 9833c046f5..8a3c8303a9 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -58,3 +58,4 @@ sensitive_url = { path = "../common/sensitive_url" } task_executor = { path = "../common/task_executor" } reqwest = { version = "0.11.0", features = ["json","stream"] } url = "2.2.2" +malloc_utils = { path = "../common/malloc_utils" } diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 95500fc947..a7118aa945 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -3,6 +3,7 @@ use crate::{ duties_service::{DutiesService, DutyAndProof}, http_metrics::metrics, validator_store::ValidatorStore, + OfflineOnFailure, }; use environment::RuntimeContext; use futures::future::join_all; @@ -337,17 +338,21 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { let attestation_data = self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_GET], - ); - beacon_node - .get_validator_attestation_data(slot, committee_index) - .await - .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) - .map(|result| result.data) - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::ATTESTATIONS_HTTP_GET], + ); + beacon_node + .get_validator_attestation_data(slot, committee_index) + .await + .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) + .map(|result| result.data) + }, + ) .await .map_err(|e| e.to_string())?; @@ -389,12 +394,13 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { ) .await { - Ok(()) => Some(attestation), + Ok(()) => Some((attestation, duty.validator_index)), Err(e) => { crit!( log, "Failed to sign attestation"; "error" => ?e, + "validator" => ?duty.pubkey, "committee_index" => committee_index, "slot" => slot.as_u64(), ); @@ -404,30 +410,35 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { }); // Execute all the futures in parallel, collecting any successful results. - let attestations = &join_all(signing_futures) + let (ref attestations, ref validator_indices): (Vec<_>, Vec<_>) = join_all(signing_futures) .await .into_iter() .flatten() - .collect::<Vec<Attestation<E>>>(); + .unzip(); // Post the attestations to the BN. match self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::ATTESTATIONS_HTTP_POST], - ); - beacon_node - .post_beacon_pool_attestations(attestations) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::ATTESTATIONS_HTTP_POST], + ); + beacon_node + .post_beacon_pool_attestations(attestations) + .await + }, + ) .await { Ok(()) => info!( log, "Successfully published attestations"; "count" => attestations.len(), + "validator_indices" => ?validator_indices, "head_block" => ?attestation_data.beacon_block_root, "committee_index" => attestation_data.index, "slot" => attestation_data.slot.as_u64(), @@ -468,21 +479,27 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { let aggregated_attestation = &self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_GET], - ); - beacon_node - .get_validator_aggregate_attestation( - attestation_data.slot, - attestation_data.tree_hash_root(), - ) - .await - .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))? - .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data)) - .map(|result| result.data) - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::AGGREGATES_HTTP_GET], + ); + beacon_node + .get_validator_aggregate_attestation( + attestation_data.slot, + attestation_data.tree_hash_root(), + ) + .await + .map_err(|e| { + format!("Failed to produce an aggregate attestation: {:?}", e) + })? + .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data)) + .map(|result| result.data) + }, + ) .await .map_err(|e| e.to_string())?; @@ -533,15 +550,19 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { let signed_aggregate_and_proofs_slice = signed_aggregate_and_proofs.as_slice(); match self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::ATTESTATION_SERVICE_TIMES, - &[metrics::AGGREGATES_HTTP_POST], - ); - beacon_node - .post_validator_aggregate_and_proof(signed_aggregate_and_proofs_slice) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::ATTESTATION_SERVICE_TIMES, + &[metrics::AGGREGATES_HTTP_POST], + ); + beacon_node + .post_validator_aggregate_and_proof(signed_aggregate_and_proofs_slice) + .await + }, + ) .await { Ok(()) => { @@ -549,7 +570,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { let attestation = &signed_aggregate_and_proof.message.aggregate; info!( log, - "Successfully published attestations"; + "Successfully published attestation"; "aggregator" => signed_aggregate_and_proof.message.aggregator_index, "signatures" => attestation.aggregation_bits.num_set_bits(), "head_block" => format!("{:?}", attestation.data.beacon_block_root), @@ -566,6 +587,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { log, "Failed to publish attestation"; "error" => %e, + "aggregator" => signed_aggregate_and_proof.message.aggregator_index, "committee_index" => attestation.data.index, "slot" => attestation.data.slot.as_u64(), "type" => "aggregated", diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index d4f7c6c874..df6c949aef 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -7,7 +7,7 @@ use crate::http_metrics::metrics::{inc_counter_vec, ENDPOINT_ERRORS, ENDPOINT_RE use environment::RuntimeContext; use eth2::BeaconNodeHttpClient; use futures::future; -use slog::{debug, error, info, warn, Logger}; +use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; @@ -16,7 +16,7 @@ use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; use tokio::{sync::RwLock, time::sleep}; -use types::{ChainSpec, EthSpec}; +use types::{ChainSpec, Config, EthSpec}; /// The number of seconds *prior* to slot start that we will try and update the state of fallback /// nodes. @@ -70,6 +70,13 @@ pub enum RequireSynced { No, } +/// Indicates if a beacon node should be set to `Offline` if a request fails. +#[derive(PartialEq, Clone, Copy)] +pub enum OfflineOnFailure { + Yes, + No, +} + impl PartialEq<bool> for RequireSynced { fn eq(&self, other: &bool) -> bool { if *other { @@ -213,9 +220,9 @@ impl<E: EthSpec> CandidateBeaconNode<E> { /// Checks if the node has the correct specification. async fn is_compatible(&self, spec: &ChainSpec, log: &Logger) -> Result<(), CandidateError> { - let config_and_preset = self + let config = self .beacon_node - .get_config_spec() + .get_config_spec::<Config>() .await .map_err(|e| { error!( @@ -228,25 +235,15 @@ impl<E: EthSpec> CandidateBeaconNode<E> { })? .data; - let beacon_node_spec = - ChainSpec::from_config::<E>(&config_and_preset.config).ok_or_else(|| { - error!( - log, - "The minimal/mainnet spec type of the beacon node does not match the validator \ - client. See the --network command."; - "endpoint" => %self.beacon_node, - ); - CandidateError::Incompatible - })?; - - if !config_and_preset.extra_fields.is_empty() { - debug!( + let beacon_node_spec = ChainSpec::from_config::<E>(&config).ok_or_else(|| { + error!( log, - "Beacon spec includes unknown fields"; + "The minimal/mainnet spec type of the beacon node does not match the validator \ + client. See the --network command."; "endpoint" => %self.beacon_node, - "fields" => ?config_and_preset.extra_fields, ); - } + CandidateError::Incompatible + })?; if beacon_node_spec.genesis_fork_version != spec.genesis_fork_version { error!( @@ -397,6 +394,7 @@ impl<T: SlotClock, E: EthSpec> BeaconNodeFallback<T, E> { pub async fn first_success<'a, F, O, Err, R>( &'a self, require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, func: F, ) -> Result<O, AllErrored<Err>> where @@ -425,7 +423,9 @@ impl<T: SlotClock, E: EthSpec> BeaconNodeFallback<T, E> { // There exists a race condition where the candidate may have been marked // as ready between the `func` call and now. We deem this an acceptable // inefficiency. - $candidate.set_offline().await; + if matches!(offline_on_failure, OfflineOnFailure::Yes) { + $candidate.set_offline().await; + } errors.push(($candidate.beacon_node.to_string(), Error::RequestFailed(e))); inc_counter_vec(&ENDPOINT_ERRORS, &[$candidate.beacon_node.as_ref()]); } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 2ba81eac7a..ac1ba11674 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -2,6 +2,7 @@ use crate::beacon_node_fallback::{AllErrored, Error as FallbackError}; use crate::{ beacon_node_fallback::{BeaconNodeFallback, RequireSynced}, graffiti_file::GraffitiFile, + OfflineOnFailure, }; use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; @@ -11,9 +12,7 @@ use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; use tokio::sync::mpsc; -use types::{ - BlindedPayload, BlockType, Epoch, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot, -}; +use types::{BlindedPayload, BlockType, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot}; #[derive(Debug)] pub enum BlockError { @@ -44,7 +43,6 @@ pub struct BlockServiceBuilder<T, E: EthSpec> { context: Option<RuntimeContext<E>>, graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, - private_tx_proposals: bool, } impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { @@ -56,7 +54,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { context: None, graffiti: None, graffiti_file: None, - private_tx_proposals: false, } } @@ -90,11 +87,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { self } - pub fn private_tx_proposals(mut self, private_tx_proposals: bool) -> Self { - self.private_tx_proposals = private_tx_proposals; - self - } - pub fn build(self) -> Result<BlockService<T, E>, String> { Ok(BlockService { inner: Arc::new(Inner { @@ -112,7 +104,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { .ok_or("Cannot build BlockService without runtime_context")?, graffiti: self.graffiti, graffiti_file: self.graffiti_file, - private_tx_proposals: self.private_tx_proposals, }), }) } @@ -126,7 +117,6 @@ pub struct Inner<T, E: EthSpec> { context: RuntimeContext<E>, graffiti: Option<Graffiti>, graffiti_file: Option<GraffitiFile>, - private_tx_proposals: bool, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -235,32 +225,29 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { ) } - let private_tx_proposals = self.private_tx_proposals; - let merge_slot = self - .context - .eth2_config - .spec - .bellatrix_fork_epoch - .unwrap_or_else(Epoch::max_value) - .start_slot(E::slots_per_epoch()); for validator_pubkey in proposers { + let builder_proposals = self + .validator_store + .get_builder_proposals(&validator_pubkey); let service = self.clone(); let log = log.clone(); self.inner.context.executor.spawn( async move { - let publish_result = if private_tx_proposals && slot >= merge_slot { + let publish_result = if builder_proposals { let mut result = service.clone() .publish_block::<BlindedPayload<E>>(slot, validator_pubkey) .await; match result.as_ref() { Err(BlockError::Recoverable(e)) => { - error!(log, "Error whilst producing a blinded block, attempting to publish full block"; "error" => ?e); + error!(log, "Error whilst producing a blinded block, attempting to \ + publish full block"; "error" => ?e); result = service .publish_block::<FullPayload<E>>(slot, validator_pubkey) .await; }, Err(BlockError::Irrecoverable(e)) => { - error!(log, "Error whilst producing a blinded block, cannot fallback because block was signed"; "error" => ?e); + error!(log, "Error whilst producing a blinded block, cannot fallback \ + because the block was signed"; "error" => ?e); }, _ => {}, }; @@ -328,59 +315,67 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; + // Request block from first responsive beacon node. let block = self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); - let block = match Payload::block_type() { - BlockType::Full => { - beacon_node - .get_validator_blocks::<E, Payload>( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - BlockType::Blinded => { - beacon_node - .get_validator_blinded_blocks::<E, Payload>( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })? - .data - } - }; - drop(get_timer); + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let block = match Payload::block_type() { + BlockType::Full => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blocks::<E, Payload>( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + BlockType::Blinded => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blinded_blocks::<E, Payload>( + slot, + randao_reveal_ref, + graffiti.as_ref(), + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })? + .data + } + }; - if proposer_index != Some(block.proposer_index()) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged" - .to_string(), - )); - } + if proposer_index != Some(block.proposer_index()) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged" + .to_string(), + )); + } - Ok::<_, BlockError>(block) - }) + Ok::<_, BlockError>(block) + }, + ) .await?; let signed_block = self_ref @@ -391,44 +386,56 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { // Publish block with first available beacon node. self.beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - - match Payload::block_type() { - BlockType::Full => beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })?, - BlockType::Blinded => beacon_node - .post_beacon_blinded_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })?, - } - - info!( - log, - "Successfully published block"; - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); - Ok::<_, BlockError>(()) - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async { + match Payload::block_type() { + BlockType::Full => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + BlockType::Blinded => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blinded_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + } + Ok::<_, BlockError>(()) + }, + ) .await?; + + info!( + log, + "Successfully published block"; + "block_type" => ?Payload::block_type(), + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); Ok(()) } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index d02e26ace0..5c7205a4ae 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -131,19 +131,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("suggested-fee-recipient") .long("suggested-fee-recipient") - .help("The fallback address provided to the BN if nothing suitable is found \ - in the validator definitions or fee recipient file.") + .help("Once the merge has happened, this address will receive transaction fees \ + from blocks proposed by this validator client. If a fee recipient is \ + configured in the validator definitions it takes priority over this value.") .value_name("FEE-RECIPIENT") .takes_value(true) ) - .arg( - Arg::with_name("suggested-fee-recipient-file") - .long("suggested-fee-recipient-file") - .help("The fallback address provided to the BN if nothing suitable is found \ - in the validator definitions.") - .value_name("FEE-RECIPIENT-FILE") - .takes_value(true) - ) /* REST API related arguments */ .arg( Arg::with_name("http") @@ -243,6 +236,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { and never provide an untrusted URL.") .takes_value(true), ) + .arg( + Arg::with_name("monitoring-endpoint-period") + .long("monitoring-endpoint-period") + .value_name("SECONDS") + .help("Defines how many seconds to wait between each message sent to \ + the monitoring-endpoint. Default: 60s") + .requires("monitoring-endpoint") + .takes_value(true), + ) .arg( Arg::with_name("enable-doppelganger-protection") .long("enable-doppelganger-protection") @@ -259,11 +261,42 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(false), ) .arg( - Arg::with_name("private-tx-proposals") - .long("private-tx-proposals") + Arg::with_name("builder-proposals") + .long("builder-proposals") + .alias("private-tx-proposals") .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") .takes_value(false), + ).arg( + Arg::with_name("strict-fee-recipient") + .long("strict-fee-recipient") + .help("[DEPRECATED] If this flag is set, Lighthouse will refuse to sign any block whose \ + `fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. \ + This applies to both the normal block proposal flow, as well as block proposals \ + through the builder API. Proposals through the builder API are more likely to have a \ + discrepancy in `fee_recipient` so you should be aware of how your connected relay \ + sends proposer payments before using this flag. If this flag is used, a fee recipient \ + mismatch in the builder API flow will result in a fallback to the local execution engine \ + for payload construction, where a strict fee recipient check will still be applied.") + .takes_value(false), ) + .arg( + Arg::with_name("builder-registration-timestamp-override") + .long("builder-registration-timestamp-override") + .alias("builder-registration-timestamp-override") + .help("This flag takes a unix timestamp value that will be used to override the \ + timestamp used in the builder api registration") + .takes_value(true), + ) + .arg( + Arg::with_name("gas-limit") + .long("gas-limit") + .value_name("INTEGER") + .takes_value(true) + .help("The gas limit to be used in all builder proposals for all validators managed \ + by this validator client. Note this will not necessarily be used if the gas limit \ + set here moves too far from the previous block's gas limit. [default: 30,000,000]") + .requires("builder-proposals"), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 45e10e39e8..22472f7512 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,8 +1,7 @@ -use crate::fee_recipient_file::FeeRecipientFile; use crate::graffiti_file::GraffitiFile; use crate::{http_api, http_metrics}; use clap::ArgMatches; -use clap_utils::{parse_optional, parse_required}; +use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_optional, parse_required}; use directory::{ get_network_dir, DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, DEFAULT_VALIDATOR_DIR, @@ -44,8 +43,6 @@ pub struct Config { pub graffiti_file: Option<GraffitiFile>, /// Fallback fallback address. pub fee_recipient: Option<Address>, - /// Fee recipient file to load per validator suggested-fee-recipients. - pub fee_recipient_file: Option<FeeRecipientFile>, /// Configuration for the HTTP REST API. pub http_api: http_api::Config, /// Configuration for the HTTP REST API. @@ -55,7 +52,12 @@ pub struct Config { /// If true, enable functionality that monitors the network for attestations or proposals from /// any of the validators managed by this client before starting up. pub enable_doppelganger_protection: bool, - pub private_tx_proposals: bool, + /// Enable use of the blinded block endpoints during proposals. + pub builder_proposals: bool, + /// Overrides the timestamp field in builder api ValidatorRegistrationV1 + pub builder_registration_timestamp_override: Option<u64>, + /// Fallback gas limit. + pub gas_limit: Option<u64>, /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option<Vec<PathBuf>>, @@ -86,13 +88,14 @@ impl Default for Config { graffiti: None, graffiti_file: None, fee_recipient: None, - fee_recipient_file: None, http_api: <_>::default(), http_metrics: <_>::default(), monitoring_api: None, enable_doppelganger_protection: false, beacon_nodes_tls_certs: None, - private_tx_proposals: false, + builder_proposals: false, + builder_registration_timestamp_override: None, + gas_limit: None, } } } @@ -206,19 +209,6 @@ impl Config { } } - if let Some(fee_recipient_file_path) = cli_args.value_of("suggested-fee-recipient-file") { - let mut fee_recipient_file = FeeRecipientFile::new(fee_recipient_file_path.into()); - fee_recipient_file - .read_fee_recipient_file() - .map_err(|e| format!("Error reading suggested-fee-recipient file: {:?}", e))?; - config.fee_recipient_file = Some(fee_recipient_file); - info!( - log, - "Successfully loaded suggested-fee-recipient file"; - "path" => fee_recipient_file_path - ); - } - if let Some(input_fee_recipient) = parse_optional::<Address>(cli_args, "suggested-fee-recipient")? { @@ -293,13 +283,21 @@ impl Config { config.http_metrics.allow_origin = Some(allow_origin.to_string()); } + + if cli_args.is_present(DISABLE_MALLOC_TUNING_FLAG) { + config.http_metrics.allocator_metrics_enabled = false; + } + /* * Explorer metrics */ if let Some(monitoring_endpoint) = cli_args.value_of("monitoring-endpoint") { + let update_period_secs = + clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; config.monitoring_api = Some(monitoring_api::Config { db_path: None, freezer_db_path: None, + update_period_secs, monitoring_endpoint: monitoring_endpoint.to_string(), }); } @@ -308,8 +306,35 @@ impl Config { config.enable_doppelganger_protection = true; } - if cli_args.is_present("private-tx-proposals") { - config.private_tx_proposals = true; + if cli_args.is_present("builder-proposals") { + config.builder_proposals = true; + } + + config.gas_limit = cli_args + .value_of("gas-limit") + .map(|gas_limit| { + gas_limit + .parse::<u64>() + .map_err(|_| "gas-limit is not a valid u64.") + }) + .transpose()?; + + if let Some(registration_timestamp_override) = + cli_args.value_of("builder-registration-timestamp-override") + { + config.builder_registration_timestamp_override = Some( + registration_timestamp_override + .parse::<u64>() + .map_err(|_| "builder-registration-timestamp-override is not a valid u64.")?, + ); + } + + if cli_args.is_present("strict-fee-recipient") { + warn!( + log, + "The flag `--strict-fee-recipient` has been deprecated due to a bug causing \ + missed proposals. The flag will be ignored." + ); } Ok(config) diff --git a/validator_client/src/doppelganger_service.rs b/validator_client/src/doppelganger_service.rs index 9e134f94da..e6934ed48b 100644 --- a/validator_client/src/doppelganger_service.rs +++ b/validator_client/src/doppelganger_service.rs @@ -31,6 +31,7 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; use crate::validator_store::ValidatorStore; +use crate::OfflineOnFailure; use environment::RuntimeContext; use eth2::types::LivenessResponseData; use parking_lot::RwLock; @@ -176,13 +177,17 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( } else { // Request the previous epoch liveness state from the beacon node. beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node - .post_lighthouse_liveness(validator_indices, previous_epoch) - .await - .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) - .map(|result| result.data) - }) + .first_success( + RequireSynced::Yes, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_lighthouse_liveness(validator_indices, previous_epoch) + .await + .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) + .map(|result| result.data) + }, + ) .await .unwrap_or_else(|e| { crit!( @@ -199,13 +204,17 @@ async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( // Request the current epoch liveness state from the beacon node. let current_epoch_responses = beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node - .post_lighthouse_liveness(validator_indices, current_epoch) - .await - .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) - .map(|result| result.data) - }) + .first_success( + RequireSynced::Yes, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_lighthouse_liveness(validator_indices, current_epoch) + .await + .map_err(|e| format!("Failed query for validator liveness: {:?}", e)) + .map(|result| result.data) + }, + ) .await .unwrap_or_else(|e| { crit!( diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index f8ca5a3d44..60b617e6c8 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -8,7 +8,7 @@ mod sync; -use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; +use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced}; use crate::{ block_service::BlockServiceNotification, http_metrics::metrics, @@ -382,27 +382,41 @@ async fn poll_validator_indices<T: SlotClock + 'static, E: EthSpec>( // Query the remote BN to resolve a pubkey to a validator index. let download_result = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::VALIDATOR_ID_HTTP_GET], - ); - beacon_node - .get_beacon_states_validator_id( - StateId::Head, - &ValidatorId::PublicKey(pubkey), - ) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::VALIDATOR_ID_HTTP_GET], + ); + beacon_node + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(pubkey), + ) + .await + }, + ) .await; + let fee_recipient = duties_service + .validator_store + .get_fee_recipient(&pubkey) + .map(|fr| fr.to_string()) + .unwrap_or_else(|| { + "Fee recipient for validator not set in validator_definitions.yml \ + or provided with the `--suggested-fee-recipient` flag" + .to_string() + }); match download_result { Ok(Some(response)) => { info!( log, "Validator exists in beacon chain"; "pubkey" => ?pubkey, - "validator_index" => response.data.index + "validator_index" => response.data.index, + "fee_recipient" => fee_recipient ); duties_service .validator_store @@ -416,7 +430,8 @@ async fn poll_validator_indices<T: SlotClock + 'static, E: EthSpec>( debug!( log, "Validator without index"; - "pubkey" => ?pubkey + "pubkey" => ?pubkey, + "fee_recipient" => fee_recipient ) } // Don't exit early on an error, keep attempting to resolve other indices. @@ -426,6 +441,7 @@ async fn poll_validator_indices<T: SlotClock + 'static, E: EthSpec>( "Failed to resolve pubkey to index"; "error" => %e, "pubkey" => ?pubkey, + "fee_recipient" => fee_recipient ) } } @@ -559,15 +575,19 @@ async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>( let subscriptions_ref = &subscriptions; if let Err(e) = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::SUBSCRIPTIONS_HTTP_POST], - ); - beacon_node - .post_validator_beacon_committee_subscriptions(subscriptions_ref) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::SUBSCRIPTIONS_HTTP_POST], + ); + beacon_node + .post_validator_beacon_committee_subscriptions(subscriptions_ref) + .await + }, + ) .await { error!( @@ -619,15 +639,19 @@ async fn poll_beacon_attesters_for_epoch<T: SlotClock + 'static, E: EthSpec>( let response = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::ATTESTER_DUTIES_HTTP_POST], - ); - beacon_node - .post_validator_duties_attester(epoch, local_indices) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::ATTESTER_DUTIES_HTTP_POST], + ); + beacon_node + .post_validator_duties_attester(epoch, local_indices) + .await + }, + ) .await .map_err(|e| Error::FailedToDownloadAttesters(e.to_string()))?; @@ -779,15 +803,19 @@ async fn poll_beacon_proposers<T: SlotClock + 'static, E: EthSpec>( if !local_pubkeys.is_empty() { let download_result = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::PROPOSER_DUTIES_HTTP_GET], - ); - beacon_node - .get_validator_duties_proposer(current_epoch) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::PROPOSER_DUTIES_HTTP_GET], + ); + beacon_node + .get_validator_duties_proposer(current_epoch) + .await + }, + ) .await; match download_result { diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index 9857561c48..b9d4d70306 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -1,3 +1,4 @@ +use crate::beacon_node_fallback::OfflineOnFailure; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, @@ -420,11 +421,15 @@ pub async fn poll_sync_committee_duties_for_period<T: SlotClock + 'static, E: Et let duties_response = duties_service .beacon_nodes - .first_success(duties_service.require_synced, |beacon_node| async move { - beacon_node - .post_validator_duties_sync(period_start_epoch, local_indices) - .await - }) + .first_success( + duties_service.require_synced, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_validator_duties_sync(period_start_epoch, local_indices) + .await + }, + ) .await; let duties = match duties_response { diff --git a/validator_client/src/fee_recipient_file.rs b/validator_client/src/fee_recipient_file.rs deleted file mode 100644 index 637ca6d3d5..0000000000 --- a/validator_client/src/fee_recipient_file.rs +++ /dev/null @@ -1,184 +0,0 @@ -use serde_derive::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::fs::File; -use std::io::{prelude::*, BufReader}; -use std::path::PathBuf; -use std::str::FromStr; - -use bls::PublicKeyBytes; -use types::Address; - -#[derive(Debug)] -#[allow(clippy::enum_variant_names)] -pub enum Error { - InvalidFile(std::io::Error), - InvalidLine(String), - InvalidPublicKey(String), - InvalidFeeRecipient(String), -} - -/// Struct to load validator fee-recipients from file. -/// The fee-recipient file is expected to have the following structure -/// -/// default: 0x00000000219ab540356cbb839cbe05303d7705fa -/// public_key1: fee-recipient1 -/// public_key2: fee-recipient2 -/// ... -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FeeRecipientFile { - fee_recipient_path: PathBuf, - fee_recipients: HashMap<PublicKeyBytes, Address>, - default: Option<Address>, -} - -impl FeeRecipientFile { - pub fn new(fee_recipient_path: PathBuf) -> Self { - Self { - fee_recipient_path, - fee_recipients: HashMap::new(), - default: None, - } - } - - /// Returns the fee-recipient corresponding to the given public key if present, else returns the - /// default fee-recipient. - /// - /// Returns an error if loading from the fee-recipient file fails. - pub fn get_fee_recipient(&self, public_key: &PublicKeyBytes) -> Result<Option<Address>, Error> { - Ok(self - .fee_recipients - .get(public_key) - .copied() - .or(self.default)) - } - - /// Loads the fee-recipient file and populates the default fee-recipient and `fee_recipients` hashmap. - /// Returns the fee-recipient corresponding to the given public key if present, else returns the - /// default fee-recipient. - /// - /// Returns an error if loading from the fee-recipient file fails. - pub fn load_fee_recipient( - &mut self, - public_key: &PublicKeyBytes, - ) -> Result<Option<Address>, Error> { - self.read_fee_recipient_file()?; - Ok(self - .fee_recipients - .get(public_key) - .copied() - .or(self.default)) - } - - /// Reads from a fee-recipient file with the specified format and populates the default value - /// and the hashmap. - /// - /// Returns an error if the file does not exist, or if the format is invalid. - pub fn read_fee_recipient_file(&mut self) -> Result<(), Error> { - let file = File::open(self.fee_recipient_path.as_path()).map_err(Error::InvalidFile)?; - let reader = BufReader::new(file); - - let lines = reader.lines(); - - self.default = None; - self.fee_recipients.clear(); - - for line in lines { - let line = line.map_err(|e| Error::InvalidLine(e.to_string()))?; - let (pk_opt, fee_recipient) = read_line(&line)?; - match pk_opt { - Some(pk) => { - self.fee_recipients.insert(pk, fee_recipient); - } - None => self.default = Some(fee_recipient), - } - } - Ok(()) - } -} - -/// Parses a line from the fee-recipient file. -/// -/// `Ok((None, fee_recipient))` represents the fee-recipient for the default key. -/// `Ok((Some(pk), fee_recipient))` represents fee-recipient for the public key `pk`. -/// Returns an error if the line is in the wrong format or does not contain a valid public key or fee-recipient. -fn read_line(line: &str) -> Result<(Option<PublicKeyBytes>, Address), Error> { - if let Some(i) = line.find(':') { - let (key, value) = line.split_at(i); - // Note: `value.len() >=1` so `value[1..]` is safe - let fee_recipient = Address::from_str(value[1..].trim()) - .map_err(|e| Error::InvalidFeeRecipient(e.to_string()))?; - if key == "default" { - Ok((None, fee_recipient)) - } else { - let pk = PublicKeyBytes::from_str(key).map_err(Error::InvalidPublicKey)?; - Ok((Some(pk), fee_recipient)) - } - } else { - Err(Error::InvalidLine(format!("Missing delimiter: {}", line))) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use bls::Keypair; - use std::io::LineWriter; - use tempfile::TempDir; - - const DEFAULT_FEE_RECIPIENT: &str = "0x00000000219ab540356cbb839cbe05303d7705fa"; - const CUSTOM_FEE_RECIPIENT1: &str = "0x4242424242424242424242424242424242424242"; - const CUSTOM_FEE_RECIPIENT2: &str = "0x0000000000000000000000000000000000000001"; - const PK1: &str = "0x800012708dc03f611751aad7a43a082142832b5c1aceed07ff9b543cf836381861352aa923c70eeb02018b638aa306aa"; - const PK2: &str = "0x80001866ce324de7d80ec73be15e2d064dcf121adf1b34a0d679f2b9ecbab40ce021e03bb877e1a2fe72eaaf475e6e21"; - - // Create a fee-recipient file in the required format and return a path to the file. - fn create_fee_recipient_file() -> PathBuf { - let temp = TempDir::new().unwrap(); - let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); - let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); - - let file_name = temp.into_path().join("fee_recipient.txt"); - - let file = File::create(&file_name).unwrap(); - let mut fee_recipient_file = LineWriter::new(file); - fee_recipient_file - .write_all(format!("default: {}\n", DEFAULT_FEE_RECIPIENT).as_bytes()) - .unwrap(); - fee_recipient_file - .write_all(format!("{}: {}\n", pk1.as_hex_string(), CUSTOM_FEE_RECIPIENT1).as_bytes()) - .unwrap(); - fee_recipient_file - .write_all(format!("{}: {}\n", pk2.as_hex_string(), CUSTOM_FEE_RECIPIENT2).as_bytes()) - .unwrap(); - fee_recipient_file.flush().unwrap(); - file_name - } - - #[test] - fn test_load_fee_recipient() { - let fee_recipient_file_path = create_fee_recipient_file(); - let mut gf = FeeRecipientFile::new(fee_recipient_file_path); - - let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); - let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); - - // Read once - gf.read_fee_recipient_file().unwrap(); - - assert_eq!( - gf.load_fee_recipient(&pk1).unwrap().unwrap(), - Address::from_str(CUSTOM_FEE_RECIPIENT1).unwrap() - ); - assert_eq!( - gf.load_fee_recipient(&pk2).unwrap().unwrap(), - Address::from_str(CUSTOM_FEE_RECIPIENT2).unwrap() - ); - - // Random pk should return the default fee-recipient - let random_pk = Keypair::random().pk.compress(); - assert_eq!( - gf.load_fee_recipient(&random_pk).unwrap().unwrap(), - Address::from_str(DEFAULT_FEE_RECIPIENT).unwrap() - ); - } -} diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index db59c25f75..a32ccce627 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -140,6 +140,8 @@ pub async fn create_validators_mnemonic<P: AsRef<Path>, T: 'static + SlotClock, request.enable, request.graffiti.clone(), request.suggested_fee_recipient, + request.gas_limit, + request.builder_proposals, ) .await .map_err(|e| { @@ -154,6 +156,8 @@ pub async fn create_validators_mnemonic<P: AsRef<Path>, T: 'static + SlotClock, description: request.description.clone(), graffiti: request.graffiti.clone(), suggested_fee_recipient: request.suggested_fee_recipient, + gas_limit: request.gas_limit, + builder_proposals: request.builder_proposals, voting_pubkey, eth1_deposit_tx_data: eth2_serde_utils::hex::encode(ð1_deposit_data.rlp), deposit_gwei: request.deposit_gwei, diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index f88aacfca8..b886f60435 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -40,7 +40,7 @@ pub fn list<T: SlotClock + 'static, E: EthSpec>( SigningMethod::LocalKeystore { ref voting_keystore, .. - } => (voting_keystore.path(), None), + } => (voting_keystore.path(), Some(false)), SigningMethod::Web3Signer { .. } => (None, Some(true)), }); @@ -205,6 +205,8 @@ fn import_single_keystore<T: SlotClock + 'static, E: EthSpec>( true, None, None, + None, + None, )) .map_err(|e| format!("failed to initialize validator: {:?}", e))?; diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 9ee983a35a..e9c7bf69d4 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -7,12 +7,13 @@ mod tests; use crate::ValidatorStore; use account_utils::{ mnemonic_from_phrase, - validator_definitions::{SigningDefinition, ValidatorDefinition}, + validator_definitions::{SigningDefinition, ValidatorDefinition, Web3SignerDefinition}, }; +pub use api_secret::ApiSecret; use create_validator::{create_validators_mnemonic, create_validators_web3signer}; use eth2::lighthouse_vc::{ - std_types::AuthResponse, - types::{self as api_types, PublicKey, PublicKeyBytes}, + std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, + types::{self as api_types, GenericResponse, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; use serde::{Deserialize, Serialize}; @@ -35,8 +36,6 @@ use warp::{ Filter, }; -pub use api_secret::ApiSecret; - #[derive(Debug)] pub enum Error { Warp(warp::Error), @@ -218,8 +217,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .and(signer.clone()) .and_then(|spec: Arc<_>, signer| { blocking_signed_json_task(signer, move || { - let mut config = ConfigAndPreset::from_chain_spec::<E>(&spec); - config.make_backwards_compat(&spec); + let config = ConfigAndPreset::from_chain_spec::<E>(&spec, None); Ok(api_types::GenericResponse::from(config)) }) }); @@ -414,6 +412,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( let voting_password = body.password.clone(); let graffiti = body.graffiti.clone(); let suggested_fee_recipient = body.suggested_fee_recipient; + let gas_limit = body.gas_limit; + let builder_proposals = body.builder_proposals; let validator_def = { if let Some(handle) = task_executor.handle() { @@ -424,6 +424,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( body.enable, graffiti, suggested_fee_recipient, + gas_limit, + builder_proposals, )) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -470,14 +472,19 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( voting_public_key: web3signer.voting_public_key, graffiti: web3signer.graffiti, suggested_fee_recipient: web3signer.suggested_fee_recipient, + gas_limit: web3signer.gas_limit, + builder_proposals: web3signer.builder_proposals, description: web3signer.description, - signing_definition: SigningDefinition::Web3Signer { - url: web3signer.url, - root_certificate_path: web3signer.root_certificate_path, - request_timeout_ms: web3signer.request_timeout_ms, - client_identity_path: web3signer.client_identity_path, - client_identity_password: web3signer.client_identity_password, - }, + signing_definition: SigningDefinition::Web3Signer( + Web3SignerDefinition { + url: web3signer.url, + root_certificate_path: web3signer.root_certificate_path, + request_timeout_ms: web3signer.request_timeout_ms, + client_identity_path: web3signer.client_identity_path, + client_identity_password: web3signer + .client_identity_password, + }, + ), }) .collect(); handle.block_on(create_validators_web3signer( @@ -513,18 +520,32 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( let initialized_validators_rw_lock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rw_lock.write(); - match initialized_validators.is_enabled(&validator_pubkey) { - None => Err(warp_utils::reject::custom_not_found(format!( + match ( + initialized_validators.is_enabled(&validator_pubkey), + initialized_validators.validator(&validator_pubkey.compress()), + ) { + (None, _) => Err(warp_utils::reject::custom_not_found(format!( "no validator for {:?}", validator_pubkey ))), - Some(enabled) if enabled == body.enabled => Ok(()), - Some(_) => { + (Some(is_enabled), Some(initialized_validator)) + if Some(is_enabled) == body.enabled + && initialized_validator.get_gas_limit() == body.gas_limit + && initialized_validator.get_builder_proposals() + == body.builder_proposals => + { + Ok(()) + } + (Some(_), _) => { if let Some(handle) = task_executor.handle() { handle .block_on( - initialized_validators - .set_validator_status(&validator_pubkey, body.enabled), + initialized_validators.set_validator_definition_fields( + &validator_pubkey, + body.enabled, + body.gas_limit, + body.builder_proposals, + ), ) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -562,6 +583,232 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( let std_keystores = eth_v1.and(warp::path("keystores")).and(warp::path::end()); let std_remotekeys = eth_v1.and(warp::path("remotekeys")).and(warp::path::end()); + // GET /eth/v1/validator/{pubkey}/feerecipient + let get_fee_recipient = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("feerecipient")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc<ValidatorStore<T, E>>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .get_fee_recipient(&PublicKeyBytes::from(&validator_pubkey)) + .map(|fee_recipient| { + GenericResponse::from(GetFeeRecipientResponse { + pubkey: PublicKeyBytes::from(validator_pubkey.clone()), + ethaddress: fee_recipient, + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_server_error( + "no fee recipient set".to_string(), + ) + }) + }) + }, + ); + + // POST /eth/v1/validator/{pubkey}/feerecipient + let post_fee_recipient = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("feerecipient")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, + request: api_types::UpdateFeeRecipientRequest, + validator_store: Arc<ValidatorStore<T, E>>, + signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .set_validator_fee_recipient(&validator_pubkey, request.ethaddress) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting fee recipient: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::ACCEPTED)); + + // DELETE /eth/v1/validator/{pubkey}/feerecipient + let delete_fee_recipient = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("feerecipient")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc<ValidatorStore<T, E>>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .delete_validator_fee_recipient(&validator_pubkey) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting fee recipient removal: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + + // GET /eth/v1/validator/{pubkey}/gas_limit + let get_gas_limit = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("gas_limit")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc<ValidatorStore<T, E>>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + Ok(GenericResponse::from(GetGasLimitResponse { + pubkey: PublicKeyBytes::from(validator_pubkey.clone()), + gas_limit: validator_store + .get_gas_limit(&PublicKeyBytes::from(&validator_pubkey)), + })) + }) + }, + ); + + // POST /eth/v1/validator/{pubkey}/gas_limit + let post_gas_limit = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("gas_limit")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, + request: api_types::UpdateGasLimitRequest, + validator_store: Arc<ValidatorStore<T, E>>, + signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .set_validator_gas_limit(&validator_pubkey, request.gas_limit) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting gas limit: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::ACCEPTED)); + + // DELETE /eth/v1/validator/{pubkey}/gas_limit + let delete_gas_limit = eth_v1 + .and(warp::path("validator")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path("gas_limit")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: Arc<ValidatorStore<T, E>>, signer| { + blocking_signed_json_task(signer, move || { + if validator_store + .initialized_validators() + .read() + .is_enabled(&validator_pubkey) + .is_none() + { + return Err(warp_utils::reject::custom_not_found(format!( + "no validator found with pubkey {:?}", + validator_pubkey + ))); + } + validator_store + .initialized_validators() + .write() + .delete_validator_gas_limit(&validator_pubkey) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error persisting gas limit removal: {:?}", + e + )) + }) + }) + }, + ) + .map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT)); + // GET /eth/v1/keystores let get_std_keystores = std_keystores .and(signer.clone()) @@ -647,6 +894,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .or(get_lighthouse_spec) .or(get_lighthouse_validators) .or(get_lighthouse_validators_pubkey) + .or(get_fee_recipient) + .or(get_gas_limit) .or(get_std_keystores) .or(get_std_remotekeys), ) @@ -655,11 +904,18 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( .or(post_validators_keystore) .or(post_validators_mnemonic) .or(post_validators_web3signer) + .or(post_fee_recipient) + .or(post_gas_limit) .or(post_std_keystores) .or(post_std_remotekeys), )) .or(warp::patch().and(patch_validators)) - .or(warp::delete().and(delete_std_keystores.or(delete_std_remotekeys))), + .or(warp::delete().and( + delete_fee_recipient + .or(delete_gas_limit) + .or(delete_std_keystores) + .or(delete_std_remotekeys), + )), ) // The auth route is the only route that is allowed to be accessed without the API token. .or(warp::get().and(get_auth)) diff --git a/validator_client/src/http_api/remotekeys.rs b/validator_client/src/http_api/remotekeys.rs index 402396d4b4..991dfb8bf7 100644 --- a/validator_client/src/http_api/remotekeys.rs +++ b/validator_client/src/http_api/remotekeys.rs @@ -1,6 +1,8 @@ //! Implementation of the standard remotekey management API. use crate::{initialized_validators::Error, InitializedValidators, ValidatorStore}; -use account_utils::validator_definitions::{SigningDefinition, ValidatorDefinition}; +use account_utils::validator_definitions::{ + SigningDefinition, ValidatorDefinition, Web3SignerDefinition, +}; use eth2::lighthouse_vc::std_types::{ DeleteRemotekeyStatus, DeleteRemotekeysRequest, DeleteRemotekeysResponse, ImportRemotekeyStatus, ImportRemotekeysRequest, ImportRemotekeysResponse, @@ -31,11 +33,13 @@ pub fn list<T: SlotClock + 'static, E: EthSpec>( match &def.signing_definition { SigningDefinition::LocalKeystore { .. } => None, - SigningDefinition::Web3Signer { url, .. } => Some(SingleListRemotekeysResponse { - pubkey: validating_pubkey, - url: url.clone(), - readonly: false, - }), + SigningDefinition::Web3Signer(Web3SignerDefinition { url, .. }) => { + Some(SingleListRemotekeysResponse { + pubkey: validating_pubkey, + url: url.clone(), + readonly: false, + }) + } } }) .collect::<Vec<_>>(); @@ -119,14 +123,16 @@ fn import_single_remotekey<T: SlotClock + 'static, E: EthSpec>( voting_public_key: pubkey, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::from("Added by remotekey API"), - signing_definition: SigningDefinition::Web3Signer { + signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url, root_certificate_path: None, request_timeout_ms: None, client_identity_path: None, client_identity_password: None, - }, + }), }; handle .block_on(validator_store.add_validator(web3signer_validator)) diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 210555d9c0..b121dda5b1 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -36,6 +36,7 @@ use tokio::runtime::Runtime; use tokio::sync::oneshot; const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; +pub const TEST_DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); type E = MainnetEthSpec; @@ -82,6 +83,7 @@ impl ApiTester { let mut config = Config::default(); config.validator_dir = validator_dir.path().into(); config.secrets_dir = secrets_dir.path().into(); + config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); let spec = E::default_spec(); @@ -102,6 +104,7 @@ impl ApiTester { spec, Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock, + &config, executor.clone(), log.clone(), )); @@ -185,7 +188,7 @@ impl ApiTester { missing_token_client.send_authorization_header(false); match func(missing_token_client).await { Err(ApiError::ServerMessage(ApiErrorMessage { - code: 400, message, .. + code: 401, message, .. })) if message.contains("missing Authorization header") => (), Err(other) => panic!("expected missing header error, got {:?}", other), Ok(_) => panic!("expected missing header error, got Ok"), @@ -205,10 +208,13 @@ impl ApiTester { } pub async fn test_get_lighthouse_spec(self) -> Self { - let result = self.client.get_lighthouse_spec().await.unwrap().data; - - let mut expected = ConfigAndPreset::from_chain_spec::<E>(&E::default_spec()); - expected.make_backwards_compat(&E::default_spec()); + let result = self + .client + .get_lighthouse_spec::<ConfigAndPresetBellatrix>() + .await + .map(|res| ConfigAndPreset::Bellatrix(res.data)) + .unwrap(); + let expected = ConfigAndPreset::from_chain_spec::<E>(&E::default_spec(), None); assert_eq!(result, expected); @@ -268,6 +274,8 @@ impl ApiTester { description: format!("boi #{}", i), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, deposit_gwei: E::default_spec().max_effective_balance, }) .collect::<Vec<_>>(); @@ -399,6 +407,8 @@ impl ApiTester { keystore, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, }; self.client @@ -417,6 +427,8 @@ impl ApiTester { keystore, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, }; let response = self @@ -453,6 +465,8 @@ impl ApiTester { description: format!("{}", i), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: kp.pk, url: format!("http://signer_{}.com/", i), root_certificate_path: None, @@ -482,7 +496,7 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, enabled) + .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None) .await .unwrap(); @@ -519,6 +533,56 @@ impl ApiTester { self } + + pub async fn set_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators(&validator.voting_pubkey, None, Some(gas_limit), None) + .await + .unwrap(); + + self + } + + pub async fn assert_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store.get_gas_limit(&validator.voting_pubkey), + gas_limit + ); + + self + } + + pub async fn set_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + Some(builder_proposals), + ) + .await + .unwrap(); + + self + } + + pub async fn assert_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store + .get_builder_proposals(&validator.voting_pubkey), + builder_proposals + ); + + self + } } struct HdValidatorScenario { @@ -562,7 +626,9 @@ fn routes_with_invalid_auth() { .await .test_with_invalid_auth(|client| async move { client.get_lighthouse_health().await }) .await - .test_with_invalid_auth(|client| async move { client.get_lighthouse_spec().await }) + .test_with_invalid_auth(|client| async move { + client.get_lighthouse_spec::<types::Config>().await + }) .await .test_with_invalid_auth( |client| async move { client.get_lighthouse_validators().await }, @@ -581,6 +647,8 @@ fn routes_with_invalid_auth() { description: <_>::default(), graffiti: <_>::default(), suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), deposit_gwei: <_>::default(), }]) .await @@ -610,13 +678,15 @@ fn routes_with_invalid_auth() { keystore, graffiti: <_>::default(), suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), }) .await }) .await .test_with_invalid_auth(|client| async move { client - .patch_lighthouse_validators(&PublicKeyBytes::empty(), false) + .patch_lighthouse_validators(&PublicKeyBytes::empty(), Some(false), None, None) .await }) .await @@ -733,6 +803,74 @@ fn validator_enabling() { }); } +#[test] +fn validator_gas_limit() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_gas_limit(0, 500) + .await + .assert_gas_limit(0, 500) + .await + // Update gas limit while validator is disabled. + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_gas_limit(0, 1000) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_gas_limit(0, 1000) + .await + }); +} + +#[test] +fn validator_builder_proposals() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_builder_proposals(0, true) + .await + // Test setting builder proposals while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_builder_proposals(0, false) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_builder_proposals(0, false) + .await + }); +} + #[test] fn keystore_validator_creation() { let runtime = build_runtime(); diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index a381378ffe..769d8a1d49 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -1,5 +1,8 @@ +use super::super::super::validator_store::DEFAULT_GAS_LIMIT; use super::*; use account_utils::random_password_string; +use bls::PublicKeyBytes; +use eth2::lighthouse_vc::types::UpdateFeeRecipientRequest; use eth2::lighthouse_vc::{ http_client::ValidatorClientHttpClient as HttpClient, std_types::{KeystoreJsonStr as Keystore, *}, @@ -9,6 +12,7 @@ use itertools::Itertools; use rand::{rngs::SmallRng, Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; use std::{collections::HashMap, path::Path}; +use types::Address; fn new_keystore(password: ZeroizeString) -> Keystore { let keypair = Keypair::random(); @@ -36,6 +40,8 @@ fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorReq description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: pubkey, url: web3_signer_url(), root_certificate_path: None, @@ -393,7 +399,7 @@ fn get_web3_signer_keystores() { .map(|local_keystore| SingleKeystoreResponse { validating_pubkey: keystore_pubkey(local_keystore), derivation_path: local_keystore.path(), - readonly: None, + readonly: Some(false), }) .chain(remote_vals.iter().map(|remote_val| SingleKeystoreResponse { validating_pubkey: remote_val.voting_public_key.compress(), @@ -462,7 +468,7 @@ fn import_and_delete_conflicting_web3_signer_keystores() { for pubkey in &pubkeys { tester .client - .patch_lighthouse_validators(pubkey, false) + .patch_lighthouse_validators(pubkey, Some(false), None, None) .await .unwrap(); } @@ -585,6 +591,360 @@ fn import_invalid_slashing_protection() { }) } +#[test] +fn check_get_set_fee_recipient() { + run_test(|tester: ApiTester| async move { + let _ = &tester; + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::<Vec<_>>(); + let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::<Vec<_>>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_keystore_import_response(&import_res, all_imported(keystores.len())); + + // Check that GET lists all the imported keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_keystore_get_response(&get_res, &keystores); + + // Before setting anything, every fee recipient should be set to TEST_DEFAULT_FEE_RECIPIENT + for pubkey in &all_pubkeys { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: TEST_DEFAULT_FEE_RECIPIENT, + } + ); + } + + use std::str::FromStr; + let fee_recipient_public_key_1 = + Address::from_str("0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b").unwrap(); + let fee_recipient_public_key_2 = + Address::from_str("0x0000000000000000000000000000000000000001").unwrap(); + let fee_recipient_override = + Address::from_str("0x0123456789abcdef0123456789abcdef01234567").unwrap(); + + // set the fee recipient for pubkey[1] using the API + tester + .client + .post_fee_recipient( + &all_pubkeys[1], + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient_public_key_1.clone(), + }, + ) + .await + .expect("should update fee recipient"); + // now everything but pubkey[1] should be TEST_DEFAULT_FEE_RECIPIENT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 1 { + fee_recipient_public_key_1.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + + // set the fee recipient for pubkey[2] using the API + tester + .client + .post_fee_recipient( + &all_pubkeys[2], + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient_public_key_2.clone(), + }, + ) + .await + .expect("should update fee recipient"); + // now everything but pubkey[1] & pubkey[2] should be fee_recipient_file_default + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 1 { + fee_recipient_public_key_1.clone() + } else if i == 2 { + fee_recipient_public_key_2.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + + // should be able to override previous fee_recipient + tester + .client + .post_fee_recipient( + &all_pubkeys[1], + &UpdateFeeRecipientRequest { + ethaddress: fee_recipient_override.clone(), + }, + ) + .await + .expect("should update fee recipient"); + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 1 { + fee_recipient_override.clone() + } else if i == 2 { + fee_recipient_public_key_2.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + + // delete fee recipient for pubkey[1] using the API + tester + .client + .delete_fee_recipient(&all_pubkeys[1]) + .await + .expect("should delete fee recipient"); + // now everything but pubkey[2] should be TEST_DEFAULT_FEE_RECIPIENT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_fee_recipient(pubkey) + .await + .expect("should get fee recipient"); + let expected = if i == 2 { + fee_recipient_public_key_2.clone() + } else { + TEST_DEFAULT_FEE_RECIPIENT + }; + assert_eq!( + get_res, + GetFeeRecipientResponse { + pubkey: pubkey.clone(), + ethaddress: expected, + } + ); + } + }) +} + +#[test] +fn check_get_set_gas_limit() { + run_test(|tester: ApiTester| async move { + let _ = &tester; + let password = random_password_string(); + let keystores = (0..3) + .map(|_| new_keystore(password.clone())) + .collect::<Vec<_>>(); + let all_pubkeys = keystores.iter().map(keystore_pubkey).collect::<Vec<_>>(); + + let import_res = tester + .client + .post_keystores(&ImportKeystoresRequest { + keystores: keystores.clone(), + passwords: vec![password.clone(); keystores.len()], + slashing_protection: None, + }) + .await + .unwrap(); + + // All keystores should be imported. + check_keystore_import_response(&import_res, all_imported(keystores.len())); + + // Check that GET lists all the imported keystores. + let get_res = tester.client.get_keystores().await.unwrap(); + check_keystore_get_response(&get_res, &keystores); + + // Before setting anything, every gas limit should be set to DEFAULT_GAS_LIMIT + for pubkey in &all_pubkeys { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: DEFAULT_GAS_LIMIT, + } + ); + } + + let gas_limit_public_key_1 = 40_000_000; + let gas_limit_public_key_2 = 42; + let gas_limit_override = 100; + + // set the gas limit for pubkey[1] using the API + tester + .client + .post_gas_limit( + &all_pubkeys[1], + &UpdateGasLimitRequest { + gas_limit: gas_limit_public_key_1, + }, + ) + .await + .expect("should update gas limit"); + // now everything but pubkey[1] should be DEFAULT_GAS_LIMIT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + let expected = if i == 1 { + gas_limit_public_key_1.clone() + } else { + DEFAULT_GAS_LIMIT + }; + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: expected, + } + ); + } + + // set the gas limit for pubkey[2] using the API + tester + .client + .post_gas_limit( + &all_pubkeys[2], + &UpdateGasLimitRequest { + gas_limit: gas_limit_public_key_2, + }, + ) + .await + .expect("should update gas limit"); + // now everything but pubkey[1] & pubkey[2] should be DEFAULT_GAS_LIMIT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + let expected = if i == 1 { + gas_limit_public_key_1 + } else if i == 2 { + gas_limit_public_key_2 + } else { + DEFAULT_GAS_LIMIT + }; + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: expected, + } + ); + } + + // should be able to override previous gas_limit + tester + .client + .post_gas_limit( + &all_pubkeys[1], + &UpdateGasLimitRequest { + gas_limit: gas_limit_override, + }, + ) + .await + .expect("should update gas limit"); + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + let expected = if i == 1 { + gas_limit_override + } else if i == 2 { + gas_limit_public_key_2 + } else { + DEFAULT_GAS_LIMIT + }; + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: expected, + } + ); + } + + // delete gas limit for pubkey[1] using the API + tester + .client + .delete_gas_limit(&all_pubkeys[1]) + .await + .expect("should delete gas limit"); + // now everything but pubkey[2] should be DEFAULT_GAS_LIMIT + for (i, pubkey) in all_pubkeys.iter().enumerate() { + let get_res = tester + .client + .get_gas_limit(pubkey) + .await + .expect("should get gas limit"); + let expected = if i == 2 { + gas_limit_public_key_2 + } else { + DEFAULT_GAS_LIMIT + }; + assert_eq!( + get_res, + GetGasLimitResponse { + pubkey: pubkey.clone(), + gas_limit: expected, + } + ); + } + }) +} + fn all_indices(count: usize) -> Vec<usize> { (0..count).collect() } @@ -1415,7 +1775,7 @@ fn import_same_local_and_remote_keys() { .map(|local_keystore| SingleKeystoreResponse { validating_pubkey: keystore_pubkey(local_keystore), derivation_path: local_keystore.path(), - readonly: None, + readonly: Some(false), }) .collect::<Vec<_>>(); for response in expected_responses { diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 29e52c3870..146d008a57 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -1,4 +1,5 @@ use super::Context; +use malloc_utils::scrape_allocator_metrics; use slot_clock::SlotClock; use std::time::{SystemTime, UNIX_EPOCH}; use types::EthSpec; @@ -10,7 +11,9 @@ pub const UNREGISTERED: &str = "unregistered"; pub const FULL_UPDATE: &str = "full_update"; pub const BEACON_BLOCK: &str = "beacon_block"; pub const BEACON_BLOCK_HTTP_GET: &str = "beacon_block_http_get"; +pub const BLINDED_BEACON_BLOCK_HTTP_GET: &str = "blinded_beacon_block_http_get"; pub const BEACON_BLOCK_HTTP_POST: &str = "beacon_block_http_post"; +pub const BLINDED_BEACON_BLOCK_HTTP_POST: &str = "blinded_beacon_block_http_post"; pub const ATTESTATIONS: &str = "attestations"; pub const ATTESTATIONS_HTTP_GET: &str = "attestations_http_get"; pub const ATTESTATIONS_HTTP_POST: &str = "attestations_http_post"; @@ -84,6 +87,11 @@ lazy_static::lazy_static! { "Total count of attempted SyncSelectionProof signings", &["status"] ); + pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec( + "builder_validator_registrations_total", + "Total count of ValidatorRegistrationData signings", + &["status"] + ); pub static ref DUTIES_SERVICE_TIMES: Result<HistogramVec> = try_create_histogram_vec( "vc_duties_service_task_times_seconds", "Duration to perform duties service tasks", @@ -131,6 +139,22 @@ lazy_static::lazy_static! { &["endpoint"] ); + /* + * Beacon node availability metrics + */ + pub static ref AVAILABLE_BEACON_NODES_COUNT: Result<IntGauge> = try_create_int_gauge( + "vc_beacon_nodes_available_count", + "Number of available beacon nodes", + ); + pub static ref SYNCED_BEACON_NODES_COUNT: Result<IntGauge> = try_create_int_gauge( + "vc_beacon_nodes_synced_count", + "Number of synced beacon nodes", + ); + pub static ref TOTAL_BEACON_NODES_COUNT: Result<IntGauge> = try_create_int_gauge( + "vc_beacon_nodes_total_count", + "Total number of beacon nodes", + ); + pub static ref ETH2_FALLBACK_CONFIGURED: Result<IntGauge> = try_create_int_gauge( "sync_eth2_fallback_configured", "The number of configured eth2 fallbacks", @@ -190,6 +214,12 @@ pub fn gather_prometheus_metrics<T: EthSpec>( } } + // It's important to ensure these metrics are explicitly enabled in the case that users aren't + // using glibc and this function causes panics. + if ctx.config.allocator_metrics_enabled { + scrape_allocator_metrics(); + } + warp_utils::metrics::scrape_health_metrics(); encoder diff --git a/validator_client/src/http_metrics/mod.rs b/validator_client/src/http_metrics/mod.rs index 51a2d3f8a5..c30d603447 100644 --- a/validator_client/src/http_metrics/mod.rs +++ b/validator_client/src/http_metrics/mod.rs @@ -56,6 +56,7 @@ pub struct Config { pub listen_addr: IpAddr, pub listen_port: u16, pub allow_origin: Option<String>, + pub allocator_metrics_enabled: bool, } impl Default for Config { @@ -65,6 +66,7 @@ impl Default for Config { listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_port: 5064, allow_origin: None, + allocator_metrics_enabled: true, } } } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 0d5d4ad76e..8d9fbe281f 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -10,7 +10,8 @@ use crate::signing_method::SigningMethod; use account_utils::{ read_password, read_password_from_user, validator_definitions::{ - self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, CONFIG_FILENAME, + self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, + CONFIG_FILENAME, }, ZeroizeString, }; @@ -109,6 +110,8 @@ pub struct InitializedValidator { signing_method: Arc<SigningMethod>, graffiti: Option<Graffiti>, suggested_fee_recipient: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, /// The validators index in `state.validators`, to be updated by an external service. index: Option<u64>, } @@ -128,6 +131,22 @@ impl InitializedValidator { SigningMethod::Web3Signer { .. } => None, } } + + pub fn get_suggested_fee_recipient(&self) -> Option<Address> { + self.suggested_fee_recipient + } + + pub fn get_gas_limit(&self) -> Option<u64> { + self.gas_limit + } + + pub fn get_builder_proposals(&self) -> Option<bool> { + self.builder_proposals + } + + pub fn get_index(&self) -> Option<u64> { + self.index + } } fn open_keystore(path: &Path) -> Result<Keystore, Error> { @@ -155,6 +174,7 @@ impl InitializedValidator { def: ValidatorDefinition, key_cache: &mut KeyCache, key_stores: &mut HashMap<PathBuf, Keystore>, + web3_signer_client_map: &mut Option<HashMap<Web3SignerDefinition, Client>>, ) -> Result<Self, Error> { if !def.enabled { return Err(Error::UnableToInitializeDisabledValidator); @@ -239,46 +259,45 @@ impl InitializedValidator { voting_keypair: Arc::new(voting_keypair), } } - SigningDefinition::Web3Signer { - url, - root_certificate_path, - request_timeout_ms, - client_identity_path, - client_identity_password, - } => { - let signing_url = build_web3_signer_url(&url, &def.voting_public_key) + SigningDefinition::Web3Signer(web3_signer) => { + let signing_url = build_web3_signer_url(&web3_signer.url, &def.voting_public_key) .map_err(|e| Error::InvalidWeb3SignerUrl(e.to_string()))?; - let request_timeout = request_timeout_ms + + let request_timeout = web3_signer + .request_timeout_ms .map(Duration::from_millis) .unwrap_or(DEFAULT_REMOTE_SIGNER_REQUEST_TIMEOUT); - let builder = Client::builder().timeout(request_timeout); - - let builder = if let Some(path) = root_certificate_path { - let certificate = load_pem_certificate(path)?; - builder.add_root_certificate(certificate) - } else { - builder - }; - - let builder = if let Some(path) = client_identity_path { - let identity = load_pkcs12_identity( - path, - &client_identity_password - .ok_or(Error::MissingWeb3SignerClientIdentityPassword)?, - )?; - builder.identity(identity) - } else { - if client_identity_password.is_some() { - return Err(Error::MissingWeb3SignerClientIdentityCertificateFile); + // Check if a client has already been initialized for this remote signer url. + let http_client = if let Some(client_map) = web3_signer_client_map { + match client_map.get(&web3_signer) { + Some(client) => client.clone(), + None => { + let client = build_web3_signer_client( + web3_signer.root_certificate_path.clone(), + web3_signer.client_identity_path.clone(), + web3_signer.client_identity_password.clone(), + request_timeout, + )?; + client_map.insert(web3_signer, client.clone()); + client + } } - builder + } else { + // There are no clients in the map. + let mut new_web3_signer_client_map: HashMap<Web3SignerDefinition, Client> = + HashMap::new(); + let client = build_web3_signer_client( + web3_signer.root_certificate_path.clone(), + web3_signer.client_identity_path.clone(), + web3_signer.client_identity_password.clone(), + request_timeout, + )?; + new_web3_signer_client_map.insert(web3_signer, client.clone()); + *web3_signer_client_map = Some(new_web3_signer_client_map); + client }; - let http_client = builder - .build() - .map_err(Error::UnableToBuildWeb3SignerClient)?; - SigningMethod::Web3Signer { signing_url, http_client, @@ -291,6 +310,8 @@ impl InitializedValidator { signing_method: Arc::new(signing_method), graffiti: def.graffiti.map(Into::into), suggested_fee_recipient: def.suggested_fee_recipient, + gas_limit: def.gas_limit, + builder_proposals: def.builder_proposals, index: None, }) } @@ -332,6 +353,39 @@ fn build_web3_signer_url(base_url: &str, voting_public_key: &PublicKey) -> Resul Url::parse(base_url)?.join(&format!("api/v1/eth2/sign/{}", voting_public_key)) } +fn build_web3_signer_client( + root_certificate_path: Option<PathBuf>, + client_identity_path: Option<PathBuf>, + client_identity_password: Option<String>, + request_timeout: Duration, +) -> Result<Client, Error> { + let builder = Client::builder().timeout(request_timeout); + + let builder = if let Some(path) = root_certificate_path { + let certificate = load_pem_certificate(path)?; + builder.add_root_certificate(certificate) + } else { + builder + }; + + let builder = if let Some(path) = client_identity_path { + let identity = load_pkcs12_identity( + path, + &client_identity_password.ok_or(Error::MissingWeb3SignerClientIdentityPassword)?, + )?; + builder.identity(identity) + } else { + if client_identity_password.is_some() { + return Err(Error::MissingWeb3SignerClientIdentityCertificateFile); + } + builder + }; + + builder + .build() + .map_err(Error::UnableToBuildWeb3SignerClient) +} + /// Try to unlock `keystore` at `keystore_path` by prompting the user via `stdin`. fn unlock_keystore_via_stdin_password( keystore: &Keystore, @@ -382,6 +436,8 @@ pub struct InitializedValidators { validators_dir: PathBuf, /// The canonical set of validators. validators: HashMap<PublicKeyBytes, InitializedValidator>, + /// The clients used for communications with a remote signer. + web3_signer_client_map: Option<HashMap<Web3SignerDefinition, Client>>, /// For logging via `slog`. log: Logger, } @@ -397,6 +453,7 @@ impl InitializedValidators { validators_dir, definitions, validators: HashMap::default(), + web3_signer_client_map: None, log, }; this.update_validators().await?; @@ -585,7 +642,28 @@ impl InitializedValidators { .and_then(|v| v.suggested_fee_recipient) } - /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled` values. + /// Returns the `gas_limit` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn gas_limit(&self, public_key: &PublicKeyBytes) -> Option<u64> { + self.validators.get(public_key).and_then(|v| v.gas_limit) + } + + /// Returns the `builder_proposals` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn builder_proposals(&self, public_key: &PublicKeyBytes) -> Option<bool> { + self.validators + .get(public_key) + .and_then(|v| v.builder_proposals) + } + + /// Returns an `Option` of a reference to an `InitializedValidator` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn validator(&self, public_key: &PublicKeyBytes) -> Option<&InitializedValidator> { + self.validators.get(public_key) + } + + /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`, and `builder_proposals` + /// values. /// /// ## Notes /// @@ -593,11 +671,17 @@ impl InitializedValidators { /// disk. A newly enabled validator will be added to `self.validators`, whilst a newly disabled /// validator will be removed from `self.validators`. /// + /// If a `gas_limit` is included in the call to this function, it will also be updated and saved + /// to disk. If `gas_limit` is `None` the `gas_limit` *will not* be unset in `ValidatorDefinition` + /// or `InitializedValidator`. The same logic applies to `builder_proposals`. + /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. - pub async fn set_validator_status( + pub async fn set_validator_definition_fields( &mut self, voting_public_key: &PublicKey, - enabled: bool, + enabled: Option<bool>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, ) -> Result<(), Error> { if let Some(def) = self .definitions @@ -605,11 +689,177 @@ impl InitializedValidators { .iter_mut() .find(|def| def.voting_public_key == *voting_public_key) { - def.enabled = enabled; + // Don't overwrite fields if they are not set in this request. + if let Some(enabled) = enabled { + def.enabled = enabled; + } + if let Some(gas_limit) = gas_limit { + def.gas_limit = Some(gas_limit); + } + if let Some(builder_proposals) = builder_proposals { + def.builder_proposals = Some(builder_proposals); + } } self.update_validators().await?; + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + // Don't overwrite fields if they are not set in this request. + if let Some(gas_limit) = gas_limit { + val.gas_limit = Some(gas_limit); + } + if let Some(builder_proposals) = builder_proposals { + val.builder_proposals = Some(builder_proposals); + } + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + + /// Sets the `InitializedValidator` and `ValidatorDefinition` `suggested_fee_recipient` values. + /// + /// ## Notes + /// + /// Setting a validator `fee_recipient` will cause `self.definitions` to be updated and saved to + /// disk. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn set_validator_fee_recipient( + &mut self, + voting_public_key: &PublicKey, + fee_recipient: Address, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.suggested_fee_recipient = Some(fee_recipient); + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.suggested_fee_recipient = Some(fee_recipient); + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + + /// Removes the `InitializedValidator` and `ValidatorDefinition` `suggested_fee_recipient` values. + /// + /// ## Notes + /// + /// Removing a validator `fee_recipient` will cause `self.definitions` to be updated and saved to + /// disk. The fee_recipient for the validator will then fall back to the process level default if + /// it is set. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn delete_validator_fee_recipient( + &mut self, + voting_public_key: &PublicKey, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.suggested_fee_recipient = None; + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.suggested_fee_recipient = None; + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + + /// Sets the `InitializedValidator` and `ValidatorDefinition` `gas_limit` values. + /// + /// ## Notes + /// + /// Setting a validator `gas_limit` will cause `self.definitions` to be updated and saved to + /// disk. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn set_validator_gas_limit( + &mut self, + voting_public_key: &PublicKey, + gas_limit: u64, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.gas_limit = Some(gas_limit); + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.gas_limit = Some(gas_limit); + } + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + + /// Removes the `InitializedValidator` and `ValidatorDefinition` `gas_limit` values. + /// + /// ## Notes + /// + /// Removing a validator `gas_limit` will cause `self.definitions` to be updated and saved to + /// disk. The gas_limit for the validator will then fall back to the process level default if + /// it is set. + /// + /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. + pub fn delete_validator_gas_limit( + &mut self, + voting_public_key: &PublicKey, + ) -> Result<(), Error> { + if let Some(def) = self + .definitions + .as_mut_slice() + .iter_mut() + .find(|def| def.voting_public_key == *voting_public_key) + { + def.gas_limit = None; + } + + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + val.gas_limit = None; + } + self.definitions .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; @@ -754,6 +1004,7 @@ impl InitializedValidators { def.clone(), &mut key_cache, &mut key_stores, + &mut None, ) .await { @@ -798,11 +1049,12 @@ impl InitializedValidators { } } } - SigningDefinition::Web3Signer { .. } => { + SigningDefinition::Web3Signer(Web3SignerDefinition { .. }) => { match InitializedValidator::from_definition( def.clone(), &mut key_cache, &mut key_stores, + &mut self.web3_signer_client_map, ) .await { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 43f88b54f0..9db4cc0315 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -5,7 +5,6 @@ mod check_synced; mod cli; mod config; mod duties_service; -mod fee_recipient_file; mod graffiti_file; mod http_metrics; mod key_cache; @@ -27,7 +26,8 @@ use monitoring_api::{MonitoringHttpClient, ProcessType}; pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use crate::beacon_node_fallback::{ - start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, RequireSynced, + start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, OfflineOnFailure, + RequireSynced, }; use crate::doppelganger_service::DoppelgangerService; use account_utils::validator_definitions::ValidatorDefinitions; @@ -73,7 +73,10 @@ const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -281,7 +284,13 @@ impl<T: EthSpec> ProductionValidatorClient<T> { liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, + sync_committee_contribution: slot_duration + / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, + get_beacon_blocks_ssz: slot_duration + / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, + get_debug_beacon_states: slot_duration + / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, } } else { Timeouts::set_all(slot_duration) @@ -306,8 +315,18 @@ impl<T: EthSpec> ProductionValidatorClient<T> { &http_metrics::metrics::ETH2_FALLBACK_CONFIGURED, num_nodes.saturating_sub(1) as i64, ); - // Initialize the number of connected, synced fallbacks to 0. + // Set the total beacon node count. + set_gauge( + &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, + num_nodes as i64, + ); + + // Initialize the number of connected, synced beacon nodes to 0. set_gauge(&http_metrics::metrics::ETH2_FALLBACK_CONNECTED, 0); + set_gauge(&http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, 0); + // Initialize the number of connected, avaliable beacon nodes to 0. + set_gauge(&http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, 0); + let mut beacon_nodes: BeaconNodeFallback<_, T> = BeaconNodeFallback::new(candidates, context.eth2_config.spec.clone(), log.clone()); @@ -350,6 +369,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { context.eth2_config.spec.clone(), doppelganger_service.clone(), slot_clock.clone(), + &config, context.executor.clone(), log.clone(), )); @@ -400,7 +420,6 @@ impl<T: EthSpec> ProductionValidatorClient<T> { .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) - .private_tx_proposals(config.private_tx_proposals) .build()?; let attestation_service = AttestationServiceBuilder::new() @@ -416,8 +435,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("preparation".into())) - .fee_recipient(config.fee_recipient) - .fee_recipient_file(config.fee_recipient_file.clone()) + .builder_registration_timestamp_override(config.builder_registration_timestamp_override) .build()?; let sync_committee_service = SyncCommitteeService::new( @@ -428,7 +446,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { context.service_context("sync_committee".into()), ); - // Wait until genesis has occured. + // Wait until genesis has occurred. // // It seems most sensible to move this into the `start_service` function, but I'm caution // of making too many changes this close to genesis (<1 week). @@ -558,9 +576,11 @@ async fn init_from_beacon_node<E: EthSpec>( let genesis = loop { match beacon_nodes - .first_success(RequireSynced::No, |node| async move { - node.get_beacon_genesis().await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |node| async move { node.get_beacon_genesis().await }, + ) .await { Ok(genesis) => break genesis.data, @@ -647,9 +667,11 @@ async fn poll_whilst_waiting_for_genesis<E: EthSpec>( ) -> Result<(), String> { loop { match beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - beacon_node.get_lighthouse_staking().await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { beacon_node.get_lighthouse_staking().await }, + ) .await { Ok(is_staking) => { diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 6157027cb1..732ae68ff8 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -40,8 +40,20 @@ async fn notify<T: SlotClock + 'static, E: EthSpec>( log: &Logger, ) { let num_available = duties_service.beacon_nodes.num_available().await; + set_gauge( + &http_metrics::metrics::AVAILABLE_BEACON_NODES_COUNT, + num_available as i64, + ); let num_synced = duties_service.beacon_nodes.num_synced().await; + set_gauge( + &http_metrics::metrics::SYNCED_BEACON_NODES_COUNT, + num_synced as i64, + ); let num_total = duties_service.beacon_nodes.num_total(); + set_gauge( + &http_metrics::metrics::TOTAL_BEACON_NODES_COUNT, + num_total as i64, + ); if num_synced > 0 { info!( log, diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index b4b6caa05d..d4178f2c48 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -1,27 +1,38 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::{ - fee_recipient_file::FeeRecipientFile, - validator_store::{DoppelgangerStatus, ValidatorStore}, -}; +use crate::validator_store::{DoppelgangerStatus, ValidatorStore}; +use crate::OfflineOnFailure; +use bls::PublicKeyBytes; use environment::RuntimeContext; -use slog::{debug, error, info}; +use parking_lot::RwLock; +use slog::{debug, error, info, warn}; use slot_clock::SlotClock; +use std::collections::HashMap; +use std::hash::Hash; use std::ops::Deref; use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{sleep, Duration}; -use types::{Address, ChainSpec, EthSpec, ProposerPreparationData}; +use types::{ + Address, ChainSpec, EthSpec, ProposerPreparationData, SignedValidatorRegistrationData, + ValidatorRegistrationData, +}; /// Number of epochs before the Bellatrix hard fork to begin posting proposer preparations. const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; +/// Number of epochs to wait before re-submitting validator registration. +const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1; + +/// The number of validator registrations to include per request to the beacon node. +const VALIDATOR_REGISTRATION_BATCH_SIZE: usize = 500; + /// Builds an `PreparationService`. pub struct PreparationServiceBuilder<T: SlotClock + 'static, E: EthSpec> { validator_store: Option<Arc<ValidatorStore<T, E>>>, slot_clock: Option<T>, beacon_nodes: Option<Arc<BeaconNodeFallback<T, E>>>, context: Option<RuntimeContext<E>>, - fee_recipient: Option<Address>, - fee_recipient_file: Option<FeeRecipientFile>, + builder_registration_timestamp_override: Option<u64>, } impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { @@ -31,8 +42,7 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { slot_clock: None, beacon_nodes: None, context: None, - fee_recipient: None, - fee_recipient_file: None, + builder_registration_timestamp_override: None, } } @@ -56,13 +66,11 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { self } - pub fn fee_recipient(mut self, fee_recipient: Option<Address>) -> Self { - self.fee_recipient = fee_recipient; - self - } - - pub fn fee_recipient_file(mut self, fee_recipient_file: Option<FeeRecipientFile>) -> Self { - self.fee_recipient_file = fee_recipient_file; + pub fn builder_registration_timestamp_override( + mut self, + builder_registration_timestamp_override: Option<u64>, + ) -> Self { + self.builder_registration_timestamp_override = builder_registration_timestamp_override; self } @@ -81,8 +89,9 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationServiceBuilder<T, E> { context: self .context .ok_or("Cannot build PreparationService without runtime_context")?, - fee_recipient: self.fee_recipient, - fee_recipient_file: self.fee_recipient_file, + builder_registration_timestamp_override: self + .builder_registration_timestamp_override, + validator_registration_cache: RwLock::new(HashMap::new()), }), }) } @@ -94,8 +103,33 @@ pub struct Inner<T, E: EthSpec> { slot_clock: T, beacon_nodes: Arc<BeaconNodeFallback<T, E>>, context: RuntimeContext<E>, - fee_recipient: Option<Address>, - fee_recipient_file: Option<FeeRecipientFile>, + builder_registration_timestamp_override: Option<u64>, + // Used to track unpublished validator registration changes. + validator_registration_cache: + RwLock<HashMap<ValidatorRegistrationKey, SignedValidatorRegistrationData>>, +} + +#[derive(Hash, Eq, PartialEq, Debug, Clone)] +pub struct ValidatorRegistrationKey { + pub fee_recipient: Address, + pub gas_limit: u64, + pub pubkey: PublicKeyBytes, +} + +impl From<ValidatorRegistrationData> for ValidatorRegistrationKey { + fn from(data: ValidatorRegistrationData) -> Self { + let ValidatorRegistrationData { + fee_recipient, + gas_limit, + timestamp: _, + pubkey, + } = data; + Self { + fee_recipient, + gas_limit, + pubkey, + } + } } /// Attempts to produce proposer preparations for all known validators at the beginning of each epoch. @@ -120,8 +154,13 @@ impl<T, E: EthSpec> Deref for PreparationService<T, E> { } impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { - /// Starts the service which periodically produces proposer preparations. pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { + self.clone().start_validator_registration_service(spec)?; + self.start_proposer_prepare_service(spec) + } + + /// Starts the service which periodically produces proposer preparations. + pub fn start_proposer_prepare_service(self, spec: &ChainSpec) -> Result<(), String> { let log = self.context.log().clone(); let slot_duration = Duration::from_secs(spec.seconds_per_slot); @@ -163,6 +202,41 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { Ok(()) } + /// Starts the service which periodically sends connected beacon nodes validator registration information. + pub fn start_validator_registration_service(self, spec: &ChainSpec) -> Result<(), String> { + let log = self.context.log().clone(); + + info!( + log, + "Validator registration service started"; + ); + + let spec = spec.clone(); + let slot_duration = Duration::from_secs(spec.seconds_per_slot); + + let executor = self.context.executor.clone(); + + let validator_registration_fut = async move { + loop { + // Poll the endpoint immediately to ensure fee recipients are received. + if let Err(e) = self.register_validators().await { + error!(log,"Error during validator registration";"error" => ?e); + } + + // Wait one slot if the register validator request fails or if we should not publish at the current slot. + if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { + sleep(duration_to_next_slot).await; + } else { + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + } + } + }; + executor.spawn(validator_registration_fut, "validator_registration_service"); + Ok(()) + } + /// Return `true` if the current slot is close to or past the Bellatrix fork epoch. /// /// This avoids spamming the BN with preparations before the Bellatrix fork epoch, which may @@ -189,24 +263,50 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { fn collect_preparation_data(&self, spec: &ChainSpec) -> Vec<ProposerPreparationData> { let log = self.context.log(); + self.collect_proposal_data(|pubkey, proposal_data| { + if let Some(fee_recipient) = proposal_data.fee_recipient { + Some(ProposerPreparationData { + // Ignore fee recipients for keys without indices, they are inactive. + validator_index: proposal_data.validator_index?, + fee_recipient, + }) + } else { + if spec.bellatrix_fork_epoch.is_some() { + error!( + log, + "Validator is missing fee recipient"; + "msg" => "update validator_definitions.yml", + "pubkey" => ?pubkey + ); + } + None + } + }) + } - let fee_recipient_file = self - .fee_recipient_file - .clone() - .map(|mut fee_recipient_file| { - fee_recipient_file - .read_fee_recipient_file() - .map_err(|e| { - error!( - log, - "Error loading fee-recipient file"; - "error" => ?e - ); + fn collect_validator_registration_keys(&self) -> Vec<ValidatorRegistrationKey> { + self.collect_proposal_data(|pubkey, proposal_data| { + // Ignore fee recipients for keys without indices, they are inactive. + proposal_data.validator_index?; + + // We don't log for missing fee recipients here because this will be logged more + // frequently in `collect_preparation_data`. + proposal_data.fee_recipient.and_then(|fee_recipient| { + proposal_data + .builder_proposals + .then(|| ValidatorRegistrationKey { + fee_recipient, + gas_limit: proposal_data.gas_limit, + pubkey, }) - .unwrap_or(()); - fee_recipient_file - }); + }) + }) + } + fn collect_proposal_data<G, U>(&self, map_fn: G) -> Vec<U> + where + G: Fn(PublicKeyBytes, ProposalData) -> Option<U>, + { let all_pubkeys: Vec<_> = self .validator_store .voting_pubkeys(DoppelgangerStatus::ignored); @@ -214,41 +314,8 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { all_pubkeys .into_iter() .filter_map(|pubkey| { - // Ignore fee recipients for keys without indices, they are inactive. - let validator_index = self.validator_store.validator_index(&pubkey)?; - - // If there is a `suggested_fee_recipient` in the validator definitions yaml - // file, use that value. - let fee_recipient = self - .validator_store - .suggested_fee_recipient(&pubkey) - .or_else(|| { - // If there's nothing in the validator defs file, check the fee - // recipient file. - fee_recipient_file - .as_ref()? - .get_fee_recipient(&pubkey) - .ok()? - }) - // If there's nothing in the file, try the process-level default value. - .or(self.fee_recipient); - - if let Some(fee_recipient) = fee_recipient { - Some(ProposerPreparationData { - validator_index, - fee_recipient, - }) - } else { - if spec.bellatrix_fork_epoch.is_some() { - error!( - log, - "Validator is missing fee recipient"; - "msg" => "update validator_definitions.yml", - "pubkey" => ?pubkey - ); - } - None - } + let proposal_data = self.validator_store.proposal_data(&pubkey)?; + map_fn(pubkey, proposal_data) }) .collect() } @@ -264,11 +331,15 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { let preparation_entries = preparation_data.as_slice(); match self .beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node - .post_validator_prepare_beacon_proposer(preparation_entries) - .await - }) + .first_success( + RequireSynced::Yes, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_validator_prepare_beacon_proposer(preparation_entries) + .await + }, + ) .await { Ok(()) => debug!( @@ -284,4 +355,131 @@ impl<T: SlotClock + 'static, E: EthSpec> PreparationService<T, E> { } Ok(()) } + + /// Register validators with builders, used in the blinded block proposal flow. + async fn register_validators(&self) -> Result<(), String> { + let registration_keys = self.collect_validator_registration_keys(); + + let mut changed_keys = vec![]; + + // Need to scope this so the read lock is not held across an await point (I don't know why + // but the explicit `drop` is not enough). + { + let guard = self.validator_registration_cache.read(); + for key in registration_keys.iter() { + if !guard.contains_key(key) { + changed_keys.push(key.clone()); + } + } + drop(guard); + } + + // Check if any have changed or it's been `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION`. + if let Some(slot) = self.slot_clock.now() { + if slot % (E::slots_per_epoch() * EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION) == 0 { + self.publish_validator_registration_data(registration_keys) + .await?; + } else if !changed_keys.is_empty() { + self.publish_validator_registration_data(changed_keys) + .await?; + } + } + + Ok(()) + } + + async fn publish_validator_registration_data( + &self, + registration_keys: Vec<ValidatorRegistrationKey>, + ) -> Result<(), String> { + let log = self.context.log(); + + let registration_data_len = registration_keys.len(); + let mut signed = Vec::with_capacity(registration_data_len); + + for key in registration_keys { + let cached_registration_opt = + self.validator_registration_cache.read().get(&key).cloned(); + + let signed_data = if let Some(signed_data) = cached_registration_opt { + signed_data + } else { + let timestamp = + if let Some(timestamp) = self.builder_registration_timestamp_override { + timestamp + } else { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("{e:?}"))? + .as_secs() + }; + + let ValidatorRegistrationKey { + fee_recipient, + gas_limit, + pubkey, + } = key.clone(); + + let signed_data = match self + .validator_store + .sign_validator_registration_data(ValidatorRegistrationData { + fee_recipient, + gas_limit, + timestamp, + pubkey, + }) + .await + { + Ok(data) => data, + Err(e) => { + error!(log, "Unable to sign validator registration data"; "error" => ?e, "pubkey" => ?pubkey); + continue; + } + }; + + self.validator_registration_cache + .write() + .insert(key, signed_data.clone()); + + signed_data + }; + signed.push(signed_data); + } + + if !signed.is_empty() { + for batch in signed.chunks(VALIDATOR_REGISTRATION_BATCH_SIZE) { + match self + .beacon_nodes + .first_success( + RequireSynced::Yes, + OfflineOnFailure::No, + |beacon_node| async move { + beacon_node.post_validator_register_validator(batch).await + }, + ) + .await + { + Ok(()) => info!( + log, + "Published validator registrations to the builder network"; + "count" => registration_data_len, + ), + Err(e) => warn!( + log, + "Unable to publish validator registrations to the builder network"; + "error" => %e, + ), + } + } + } + Ok(()) + } +} + +/// A helper struct, used for passing data from the validator store to services. +pub struct ProposalData { + pub(crate) validator_index: Option<u64>, + pub(crate) fee_recipient: Option<Address>, + pub(crate) gas_limit: u64, + pub(crate) builder_proposals: bool, } diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 0daefc43c4..de69d99003 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -30,6 +30,7 @@ pub enum Error { ShuttingDown, TokioJoin(String), MergeForkNotSupported, + GenesisForkVersionRequired, } /// Enumerates all messages that can be signed by a validator. @@ -45,6 +46,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: ExecPayload<T> = FullPayload<T slot: Slot, }, SignedContributionAndProof(&'a ContributionAndProof<T>), + ValidatorRegistration(&'a ValidatorRegistrationData), } impl<'a, T: EthSpec, Payload: ExecPayload<T>> SignableMessage<'a, T, Payload> { @@ -64,6 +66,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload<T>> SignableMessage<'a, T, Payload> { beacon_block_root, .. } => beacon_block_root.signing_root(domain), SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain), + SignableMessage::ValidatorRegistration(v) => v.signing_root(domain), } } } @@ -129,6 +132,22 @@ impl SigningMethod { let signing_root = signable_message.signing_root(domain_hash); + let fork_info = Some(ForkInfo { + fork, + genesis_validators_root, + }); + + self.get_signature_from_root(signable_message, signing_root, executor, fork_info) + .await + } + + pub async fn get_signature_from_root<T: EthSpec, Payload: ExecPayload<T>>( + &self, + signable_message: SignableMessage<'_, T, Payload>, + signing_root: Hash256, + executor: &TaskExecutor, + fork_info: Option<ForkInfo>, + ) -> Result<Signature, Error> { match self { SigningMethod::LocalKeystore { voting_keypair, .. } => { let _timer = @@ -181,21 +200,21 @@ impl SigningMethod { SignableMessage::SignedContributionAndProof(c) => { Web3SignerObject::ContributionAndProof(c) } + SignableMessage::ValidatorRegistration(v) => { + Web3SignerObject::ValidatorRegistration(v) + } }; // Determine the Web3Signer message type. let message_type = object.message_type(); - // The `fork_info` field is not required for deposits since they sign across the - // genesis fork version. - let fork_info = if let Web3SignerObject::Deposit { .. } = &object { - None - } else { - Some(ForkInfo { - fork, - genesis_validators_root, - }) - }; + if matches!( + object, + Web3SignerObject::Deposit { .. } | Web3SignerObject::ValidatorRegistration(_) + ) && fork_info.is_some() + { + return Err(Error::GenesisForkVersionRequired); + } let request = SigningRequest { message_type, diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 9ac1655cce..cf02ae0c32 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -17,6 +17,7 @@ pub enum MessageType { SyncCommitteeMessage, SyncCommitteeSelectionProof, SyncCommitteeContributionAndProof, + ValidatorRegistration, } #[derive(Debug, PartialEq, Copy, Clone, Serialize)] @@ -24,6 +25,7 @@ pub enum MessageType { pub enum ForkName { Phase0, Altair, + Bellatrix, } #[derive(Debug, PartialEq, Serialize)] @@ -42,7 +44,10 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload<T>> { Attestation(&'a AttestationData), BeaconBlock { version: ForkName, - block: &'a BeaconBlock<T, Payload>, + #[serde(skip_serializing_if = "Option::is_none")] + block: Option<&'a BeaconBlock<T, Payload>>, + #[serde(skip_serializing_if = "Option::is_none")] + block_header: Option<BeaconBlockHeader>, }, #[allow(dead_code)] Deposit { @@ -64,17 +69,28 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: ExecPayload<T>> { }, SyncAggregatorSelectionData(&'a SyncAggregatorSelectionData), ContributionAndProof(&'a ContributionAndProof<T>), + ValidatorRegistration(&'a ValidatorRegistrationData), } impl<'a, T: EthSpec, Payload: ExecPayload<T>> Web3SignerObject<'a, T, Payload> { pub fn beacon_block(block: &'a BeaconBlock<T, Payload>) -> Result<Self, Error> { - let version = match block { - BeaconBlock::Base(_) => ForkName::Phase0, - BeaconBlock::Altair(_) => ForkName::Altair, - BeaconBlock::Merge(_) => return Err(Error::MergeForkNotSupported), - }; - - Ok(Web3SignerObject::BeaconBlock { version, block }) + match block { + BeaconBlock::Base(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Phase0, + block: Some(block), + block_header: None, + }), + BeaconBlock::Altair(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Altair, + block: Some(block), + block_header: None, + }), + BeaconBlock::Merge(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Bellatrix, + block: None, + block_header: Some(block.block_header()), + }), + } } pub fn message_type(&self) -> MessageType { @@ -93,6 +109,7 @@ impl<'a, T: EthSpec, Payload: ExecPayload<T>> Web3SignerObject<'a, T, Payload> { Web3SignerObject::ContributionAndProof(_) => { MessageType::SyncCommitteeContributionAndProof } + Web3SignerObject::ValidatorRegistration(_) => MessageType::ValidatorRegistration, } } } diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 105bf7d27f..1e6ff7a5b5 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -1,10 +1,10 @@ use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; -use crate::{duties_service::DutiesService, validator_store::ValidatorStore}; +use crate::{duties_service::DutiesService, validator_store::ValidatorStore, OfflineOnFailure}; use environment::RuntimeContext; use eth2::types::BlockId; use futures::future::join_all; use futures::future::FutureExt; -use slog::{crit, debug, error, info, trace}; +use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; @@ -174,17 +174,39 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { return Ok(()); } - // Fetch block root for `SyncCommitteeContribution`. - let block_root = self + // Fetch `block_root` and `execution_optimistic` for `SyncCommitteeContribution`. + let response = self .beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { + .first_success(RequireSynced::Yes, OfflineOnFailure::Yes,|beacon_node| async move { beacon_node.get_beacon_blocks_root(BlockId::Head).await }) .await .map_err(|e| e.to_string())? - .ok_or_else(|| format!("No block root found for slot {}", slot))? - .data - .root; + .ok_or_else(|| format!("No block root found for slot {}", slot))?; + + let block_root = response.data.root; + if let Some(execution_optimistic) = response.execution_optimistic { + if execution_optimistic { + warn!( + log, + "Refusing to sign sync committee messages for optimistic head block"; + "slot" => slot, + ); + return Ok(()); + } + } else if let Some(bellatrix_fork_epoch) = self.duties_service.spec.bellatrix_fork_epoch { + // If the slot is post Bellatrix, do not sign messages when we cannot verify the + // optimistic status of the head block. + if slot.epoch(E::slots_per_epoch()) > bellatrix_fork_epoch { + warn!( + log, + "Refusing to sign sync committee messages for a head block with an unknown \ + optimistic status"; + "slot" => slot, + ); + return Ok(()); + } + } // Spawn one task to publish all of the sync committee signatures. let validator_duties = slot_duties.duties; @@ -262,11 +284,15 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { .collect::<Vec<_>>(); self.beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - beacon_node - .post_beacon_pool_sync_committee_signatures(committee_signatures) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_beacon_pool_sync_committee_signatures(committee_signatures) + .await + }, + ) .await .map_err(|e| { error!( @@ -329,17 +355,21 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { let contribution = &self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - let sync_contribution_data = SyncContributionData { - slot, - beacon_block_root, - subcommittee_index: subnet_id.into(), - }; + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let sync_contribution_data = SyncContributionData { + slot, + beacon_block_root, + subcommittee_index: subnet_id.into(), + }; - beacon_node - .get_validator_sync_committee_contribution::<E>(&sync_contribution_data) - .await - }) + beacon_node + .get_validator_sync_committee_contribution::<E>(&sync_contribution_data) + .await + }, + ) .await .map_err(|e| { crit!( @@ -396,11 +426,15 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { // Publish to the beacon node. self.beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - beacon_node - .post_validator_contribution_and_proofs(signed_contributions) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_validator_contribution_and_proofs(signed_contributions) + .await + }, + ) .await .map_err(|e| { error!( @@ -534,11 +568,15 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> { if let Err(e) = self .beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { - beacon_node - .post_validator_sync_committee_subscriptions(subscriptions_slice) - .await - }) + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + beacon_node + .post_validator_sync_committee_subscriptions(subscriptions_slice) + .await + }, + ) .await { error!( diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index b39ef9ef83..292b49ac3a 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -3,6 +3,7 @@ use crate::{ http_metrics::metrics, initialized_validators::InitializedValidators, signing_method::{Error as SigningError, SignableMessage, SigningContext, SigningMethod}, + Config, }; use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; use parking_lot::{Mutex, RwLock}; @@ -20,13 +21,14 @@ use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, ExecPayload, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, - Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, Slot, - SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, - SyncSelectionProof, SyncSubnetId, + Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot, + SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, + SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; use validator_dir::ValidatorDir; pub use crate::doppelganger_service::DoppelgangerStatus; +use crate::preparation_service::ProposalData; #[derive(Debug, PartialEq)] pub enum Error { @@ -52,6 +54,11 @@ impl From<SigningError> for Error { /// This acts as a maximum safe-guard against clock drift. const SLASHING_PROTECTION_HISTORY_EPOCHS: u64 = 512; +/// Currently used as the default gas limit in execution clients. +/// +/// https://github.com/ethereum/builder-specs/issues/17 +pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; + struct LocalValidator { validator_dir: ValidatorDir, voting_keypair: Keypair, @@ -86,6 +93,9 @@ pub struct ValidatorStore<T, E: EthSpec> { log: Logger, doppelganger_service: Option<Arc<DoppelgangerService>>, slot_clock: T, + fee_recipient_process: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: bool, task_executor: TaskExecutor, _phantom: PhantomData<E>, } @@ -101,6 +111,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { spec: ChainSpec, doppelganger_service: Option<Arc<DoppelgangerService>>, slot_clock: T, + config: &Config, task_executor: TaskExecutor, log: Logger, ) -> Self { @@ -113,6 +124,9 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { log, doppelganger_service, slot_clock, + fee_recipient_process: config.fee_recipient, + gas_limit: config.gas_limit, + builder_proposals: config.builder_proposals, task_executor, _phantom: PhantomData, } @@ -143,6 +157,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { /// Insert a new validator to `self`, where the validator is represented by an EIP-2335 /// keystore on the filesystem. + #[allow(clippy::too_many_arguments)] pub async fn add_validator_keystore<P: AsRef<Path>>( &self, voting_keystore_path: P, @@ -150,12 +165,16 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { enable: bool, graffiti: Option<GraffitiString>, suggested_fee_recipient: Option<Address>, + gas_limit: Option<u64>, + builder_proposals: Option<bool>, ) -> Result<ValidatorDefinition, String> { let mut validator_def = ValidatorDefinition::new_keystore_with_password( voting_keystore_path, Some(password), graffiti.map(Into::into), suggested_fee_recipient, + gas_limit, + builder_proposals, ) .map_err(|e| format!("failed to create validator definitions: {:?}", e))?; @@ -197,6 +216,23 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { Ok(validator_def) } + /// Returns `ProposalData` for the provided `pubkey` if it exists in `InitializedValidators`. + /// `ProposalData` fields include defaulting logic described in `get_fee_recipient_defaulting`, + /// `get_gas_limit_defaulting`, and `get_builder_proposals_defaulting`. + pub fn proposal_data(&self, pubkey: &PublicKeyBytes) -> Option<ProposalData> { + self.validators + .read() + .validator(pubkey) + .map(|validator| ProposalData { + validator_index: validator.get_index(), + fee_recipient: self + .get_fee_recipient_defaulting(validator.get_suggested_fee_recipient()), + gas_limit: self.get_gas_limit_defaulting(validator.get_gas_limit()), + builder_proposals: self + .get_builder_proposals_defaulting(validator.get_builder_proposals()), + }) + } + /// Attempts to resolve the pubkey to a validator index. /// /// It may return `None` if the `pubkey` is: @@ -356,12 +392,68 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { self.validators.read().graffiti(validator_pubkey) } - pub fn suggested_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option<Address> { + /// Returns the fee recipient for the given public key. The priority order for fetching + /// the fee recipient is: + /// 1. validator_definitions.yml + /// 2. process level fee recipient + pub fn get_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option<Address> { + // If there is a `suggested_fee_recipient` in the validator definitions yaml + // file, use that value. + self.get_fee_recipient_defaulting(self.suggested_fee_recipient(validator_pubkey)) + } + + pub fn get_fee_recipient_defaulting(&self, fee_recipient: Option<Address>) -> Option<Address> { + // If there's nothing in the file, try the process-level default value. + fee_recipient.or(self.fee_recipient_process) + } + + /// Returns the suggested_fee_recipient from `validator_definitions.yml` if any. + /// This has been pulled into a private function so the read lock is dropped easily + fn suggested_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option<Address> { self.validators .read() .suggested_fee_recipient(validator_pubkey) } + /// Returns the gas limit for the given public key. The priority order for fetching + /// the gas limit is: + /// + /// 1. validator_definitions.yml + /// 2. process level gas limit + /// 3. `DEFAULT_GAS_LIMIT` + pub fn get_gas_limit(&self, validator_pubkey: &PublicKeyBytes) -> u64 { + self.get_gas_limit_defaulting(self.validators.read().gas_limit(validator_pubkey)) + } + + fn get_gas_limit_defaulting(&self, gas_limit: Option<u64>) -> u64 { + // If there is a `gas_limit` in the validator definitions yaml + // file, use that value. + gas_limit + // If there's nothing in the file, try the process-level default value. + .or(self.gas_limit) + // If there's no process-level default, use the `DEFAULT_GAS_LIMIT`. + .unwrap_or(DEFAULT_GAS_LIMIT) + } + + /// Returns a `bool` for the given public key that denotes whther this validator should use the + /// builder API. The priority order for fetching this value is: + /// + /// 1. validator_definitions.yml + /// 2. process level flag + pub fn get_builder_proposals(&self, validator_pubkey: &PublicKeyBytes) -> bool { + // If there is a `suggested_fee_recipient` in the validator definitions yaml + // file, use that value. + self.get_builder_proposals_defaulting( + self.validators.read().builder_proposals(validator_pubkey), + ) + } + + fn get_builder_proposals_defaulting(&self, builder_proposals: Option<bool>) -> bool { + builder_proposals + // If there's nothing in the file, try the process-level default value. + .unwrap_or(self.builder_proposals) + } + pub async fn sign_block<Payload: ExecPayload<E>>( &self, validator_pubkey: PublicKeyBytes, @@ -524,6 +616,35 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { } } + pub async fn sign_validator_registration_data( + &self, + validator_registration_data: ValidatorRegistrationData, + ) -> Result<SignedValidatorRegistrationData, Error> { + let domain_hash = self.spec.get_builder_domain(); + let signing_root = validator_registration_data.signing_root(domain_hash); + + let signing_method = + self.doppelganger_bypassed_signing_method(validator_registration_data.pubkey)?; + let signature = signing_method + .get_signature_from_root::<E, BlindedPayload<E>>( + SignableMessage::ValidatorRegistration(&validator_registration_data), + signing_root, + &self.task_executor, + None, + ) + .await?; + + metrics::inc_counter_vec( + &metrics::SIGNED_VALIDATOR_REGISTRATIONS_TOTAL, + &[metrics::SUCCESS], + ); + + Ok(SignedValidatorRegistrationData { + message: validator_registration_data, + signature, + }) + } + /// Signs an `AggregateAndProof` for a given validator. /// /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be