diff --git a/.github/mergify.yml b/.github/mergify.yml new file mode 100644 index 0000000000..ae01e3ffd2 --- /dev/null +++ b/.github/mergify.yml @@ -0,0 +1,13 @@ +queue_rules: + - name: default + batch_size: 8 + batch_max_wait_time: 60 s + checks_timeout: 10800 s + merge_method: squash + queue_conditions: + - "#approved-reviews-by >= 1" + - "check-success=license/cla" + - "check-success=target-branch-check" + merge_conditions: + - "check-success=test-suite-success" + - "check-success=local-testnet-success" diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 74a9071eab..75a81ce0e7 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -86,4 +86,15 @@ jobs: - name: Stop local testnet with blinded block production run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet \ No newline at end of file + working-directory: scripts/local_testnet + + # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether + # a PR is safe to merge. New jobs should be added here. + local-testnet-success: + name: local-testnet-success + runs-on: ubuntu-latest + needs: ["run-local-testnet"] + steps: + - uses: actions/checkout@v3 + - name: Check that success job is dependent on all others + run: ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index b3edb30125..87dc746ff9 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -439,3 +439,36 @@ jobs: cache-target: release - name: Run Makefile to trigger the bash script run: make cli + # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether + # a PR is safe to merge. New jobs should be added here. + test-suite-success: + name: test-suite-success + runs-on: ubuntu-latest + needs: [ + 'target-branch-check', + 'release-tests-ubuntu', + 'release-tests-windows', + 'beacon-chain-tests', + 'op-pool-tests', + 'network-tests', + 'slasher-tests', + 'debug-tests-ubuntu', + 'state-transition-vectors-ubuntu', + 'ef-tests-ubuntu', + 'dockerfile-ubuntu', + 'eth1-simulator-ubuntu', + 'merge-transition-ubuntu', + 'no-eth1-simulator-ubuntu', + 'syncing-simulator-ubuntu', + 'doppelganger-protection-test', + 'execution-engine-integration-ubuntu', + 'check-code', + 'check-msrv', + 'cargo-udeps', + 'compile-with-beta-compiler', + 'cli-check', + ] + steps: + - uses: actions/checkout@v3 + - name: Check that success job is dependent on all others + run: ./scripts/ci/check-success-job.sh ./.github/workflows/test-suite.yml test-suite-success diff --git a/Cargo.lock b/Cargo.lock index f91068efe5..b111a18749 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,9 +130,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" dependencies = [ "getrandom", "once_cell", @@ -141,13 +141,14 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -355,7 +356,7 @@ version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" dependencies = [ - "http", + "http 0.2.9", "log", "url", "wildmatch", @@ -367,7 +368,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" dependencies = [ - "http", + "http 0.2.9", "log", "url", ] @@ -403,18 +404,19 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.20" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "202651474fe73c62d9e0a56c6133f7a0ff1dc1c8cf7a5b03381af2a26553ac9d" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.0.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -435,17 +437,20 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "77cb22c689c44d4c07b0ab44ebc25d69d8ae601a2f28fb8d672d344178fa17aa" dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper", "tower-layer", "tower-service", ] @@ -528,7 +533,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "logging", - "lru 0.7.8", + "lru 0.12.0", "maplit", "merkle_proof", "oneshot_broadcast", @@ -580,7 +585,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper", + "hyper 1.0.1", "lighthouse_network", "lighthouse_version", "monitoring_api", @@ -2475,7 +2480,7 @@ dependencies = [ "getrandom", "hashers", "hex", - "http", + "http 0.2.9", "once_cell", "parking_lot 0.11.2", "pin-project", @@ -2549,7 +2554,7 @@ dependencies = [ "kzg", "lazy_static", "lighthouse_metrics", - "lru 0.7.8", + "lru 0.12.0", "parking_lot 0.12.1", "pretty_reqwest_error", "rand", @@ -3053,7 +3058,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.9", "indexmap 1.9.3", "slab", "tokio", @@ -3061,6 +3066,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "h2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d308f63daf4181410c242d34c11f928dcb3aa105852019e043c9d1f4e4368a" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 1.0.0", + "indexmap 2.0.2", + "slab", + "tokio", + "tokio-util 0.7.9", + "tracing", +] + [[package]] name = "half" version = "1.8.2" @@ -3088,7 +3112,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash 0.7.6", + "ahash 0.7.7", ] [[package]] @@ -3097,16 +3121,16 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.7.6", + "ahash 0.7.7", ] [[package]] name = "hashbrown" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.3", + "ahash 0.8.6", "allocator-api2", ] @@ -3134,7 +3158,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.1", + "hashbrown 0.14.3", ] [[package]] @@ -3146,7 +3170,7 @@ dependencies = [ "base64 0.21.5", "bytes", "headers-core", - "http", + "http 0.2.9", "httpdate", "mime", "sha1", @@ -3158,7 +3182,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http", + "http 0.2.9", ] [[package]] @@ -3320,6 +3344,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.5" @@ -3327,7 +3362,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http", + "http 0.2.9", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.0.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", "pin-project-lite", ] @@ -3354,7 +3412,7 @@ dependencies = [ "lighthouse_network", "lighthouse_version", "logging", - "lru 0.7.8", + "lru 0.12.0", "network", "operation_pool", "parking_lot 0.12.1", @@ -3427,9 +3485,9 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.21", + "http 0.2.9", + "http-body 0.4.5", "httparse", "httpdate", "itoa", @@ -3441,6 +3499,25 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403f9214f3e703236b221f1a9cd88ec8b4adfa5296de01ab96216361f4692f56" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.0", + "http 1.0.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "tokio", +] + [[package]] name = "hyper-rustls" version = "0.24.1" @@ -3448,8 +3525,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ "futures-util", - "http", - "hyper", + "http 0.2.9", + "hyper 0.14.27", "rustls", "tokio", "tokio-rustls", @@ -3462,12 +3539,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper", + "hyper 0.14.27", "native-tls", "tokio", "tokio-native-tls", ] +[[package]] +name = "hyper-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca339002caeb0d159cc6e023dff48e199f081e42fa039895c7c6f38b37f2e9d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "hyper 1.0.1", + "pin-project-lite", + "socket2 0.5.5", + "tokio", + "tower", + "tower-service", + "tracing", +] + [[package]] name = "iana-time-zone" version = "0.1.58" @@ -3580,8 +3677,8 @@ dependencies = [ "attohttpc 0.24.1", "bytes", "futures", - "http", - "hyper", + "http 0.2.9", + "hyper 0.14.27", "log", "rand", "tokio", @@ -3668,7 +3765,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.1", + "hashbrown 0.14.3", ] [[package]] @@ -4586,7 +4683,7 @@ dependencies = [ "libp2p-mplex", "lighthouse_metrics", "lighthouse_version", - "lru 0.7.8", + "lru 0.12.0", "lru_cache", "parking_lot 0.12.1", "prometheus-client", @@ -4729,7 +4826,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efa59af2ddfad1854ae27d75009d538d0998b4b2fd47083e743ac1a10e46c60" dependencies = [ - "hashbrown 0.14.1", + "hashbrown 0.14.3", ] [[package]] @@ -5159,7 +5256,7 @@ dependencies = [ "lighthouse_metrics", "lighthouse_network", "logging", - "lru 0.7.8", + "lru 0.12.0", "lru_cache", "matches", "num_cpus", @@ -6407,10 +6504,10 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.21", + "http 0.2.9", + "http-body 0.4.5", + "hyper 0.14.27", "hyper-rustls", "hyper-tls", "ipnet", @@ -6987,14 +7084,15 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.26" +version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 1.9.3", + "indexmap 2.0.2", + "itoa", "ryu", "serde", - "yaml-rust", + "unsafe-libyaml", ] [[package]] @@ -7159,7 +7257,7 @@ dependencies = [ "lmdb-rkv", "lmdb-rkv-sys", "logging", - "lru 0.7.8", + "lru 0.12.0", "maplit", "parking_lot 0.12.1", "rand", @@ -7479,7 +7577,7 @@ dependencies = [ "lazy_static", "leveldb", "lighthouse_metrics", - "lru 0.7.8", + "lru 0.12.0", "parking_lot 0.12.1", "serde", "slog", @@ -7741,8 +7839,9 @@ dependencies = [ [[package]] name = "testcontainers" -version = "0.14.0" -source = "git+https://github.com/testcontainers/testcontainers-rs/?rev=0f2c9851#0f2c985160e51a200cfc847097c15b8d85ed7df1" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d2931d7f521af5bae989f716c3fa43a6af9af7ec7a5e21b59ae40878cec00" dependencies = [ "bollard-stubs", "futures", @@ -8355,6 +8454,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "unsafe-libyaml" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa" + [[package]] name = "unsigned-varint" version = "0.6.0" @@ -8437,7 +8542,7 @@ dependencies = [ "filesystem", "futures", "hex", - "hyper", + "hyper 1.0.1", "itertools", "lazy_static", "libsecp256k1", @@ -8580,8 +8685,8 @@ dependencies = [ "futures-channel", "futures-util", "headers", - "http", - "hyper", + "http 0.2.9", + "hyper 0.14.27", "log", "mime", "mime_guess", @@ -8734,7 +8839,7 @@ dependencies = [ "eth2", "hex", "http_api", - "hyper", + "hyper 1.0.1", "log", "logging", "network", @@ -9159,6 +9264,26 @@ dependencies = [ "time", ] +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.38", +] + [[package]] name = "zeroize" version = "1.6.0" diff --git a/Cargo.toml b/Cargo.toml index 9adb913ff5..6847d650ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,12 +120,12 @@ fnv = "1" fs2 = "0.4" futures = "0.3" hex = "0.4" -hyper = "0.14" +hyper = "1" itertools = "0.10" lazy_static = "1" libsecp256k1 = "0.7" log = "0.4" -lru = "0.7" +lru = "0.12" maplit = "1" num_cpus = "1" parking_lot = "0.12" @@ -143,7 +143,7 @@ rusqlite = { version = "0.28", features = ["bundled"] } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_repr = "0.1" -serde_yaml = "0.8" +serde_yaml = "0.9" sha2 = "0.9" slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] } slog-async = "2" @@ -220,7 +220,7 @@ swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } task_executor = { path = "common/task_executor" } types = { path = "consensus/types" } unused_port = { path = "common/unused_port" } -validator_client = { path = "validator_client/" } +validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } warp_utils = { path = "common/warp_utils" } diff --git a/beacon_node/beacon_chain/src/attestation_simulator.rs b/beacon_node/beacon_chain/src/attestation_simulator.rs new file mode 100644 index 0000000000..b0b25aef18 --- /dev/null +++ b/beacon_node/beacon_chain/src/attestation_simulator.rs @@ -0,0 +1,93 @@ +use crate::{BeaconChain, BeaconChainTypes}; +use slog::{debug, error}; +use slot_clock::SlotClock; +use std::sync::Arc; +use task_executor::TaskExecutor; +use tokio::time::sleep; +use types::Slot; + +/// Spawns a routine which produces an unaggregated attestation at every slot. +/// +/// This routine will run once per slot +pub fn start_attestation_simulator_service( + executor: TaskExecutor, + chain: Arc>, +) { + executor.clone().spawn( + async move { attestation_simulator_service(executor, chain).await }, + "attestation_simulator_service", + ); +} + +/// Loop indefinitely, calling `BeaconChain::produce_unaggregated_attestation` every 4s into each slot. +async fn attestation_simulator_service( + executor: TaskExecutor, + chain: Arc>, +) { + let slot_duration = chain.slot_clock.slot_duration(); + let additional_delay = slot_duration / 3; + + loop { + match chain.slot_clock.duration_to_next_slot() { + Some(duration) => { + sleep(duration + additional_delay).await; + + debug!( + chain.log, + "Simulating unagg. attestation production"; + ); + + // Run the task in the executor + let inner_chain = chain.clone(); + executor.spawn( + async move { + if let Ok(current_slot) = inner_chain.slot() { + produce_unaggregated_attestation(inner_chain, current_slot); + } + }, + "attestation_simulator_service", + ); + } + None => { + error!(chain.log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + } + }; + } +} + +pub fn produce_unaggregated_attestation( + inner_chain: Arc>, + current_slot: Slot, +) { + // Since attestations for different committees are practically identical (apart from the committee index field) + // Committee 0 is guaranteed to exist. That means there's no need to load the committee. + let beacon_committee_index = 0; + + // Store the unaggregated attestation in the validator monitor for later processing + match inner_chain.produce_unaggregated_attestation(current_slot, beacon_committee_index) { + Ok(unaggregated_attestation) => { + let data = &unaggregated_attestation.data; + + debug!( + inner_chain.log, + "Produce unagg. attestation"; + "attestation_source" => data.source.root.to_string(), + "attestation_target" => data.target.root.to_string(), + ); + + inner_chain + .validator_monitor + .write() + .set_unaggregated_attestation(unaggregated_attestation); + } + Err(e) => { + debug!( + inner_chain.log, + "Failed to simulate attestation"; + "error" => ?e + ); + } + } +} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 71270c197d..7d1e058a1b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -406,7 +406,7 @@ pub struct BeaconChain { /// Maintains a record of which validators have proposed blocks for each slot. pub observed_block_producers: RwLock>, /// Maintains a record of blob sidecars seen over the gossip network. - pub(crate) observed_blob_sidecars: RwLock>, + pub observed_blob_sidecars: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. pub(crate) observed_voluntary_exits: Mutex>, /// Maintains a record of which validators we've seen proposer slashings for. @@ -501,14 +501,14 @@ impl BeaconBlockResponseWrapper { }) } - pub fn execution_payload_value(&self) -> Option { + pub fn execution_payload_value(&self) -> Uint256 { match self { BeaconBlockResponseWrapper::Full(resp) => resp.execution_payload_value, BeaconBlockResponseWrapper::Blinded(resp) => resp.execution_payload_value, } } - pub fn consensus_block_value(&self) -> Option { + pub fn consensus_block_value(&self) -> u64 { match self { BeaconBlockResponseWrapper::Full(resp) => resp.consensus_block_value, BeaconBlockResponseWrapper::Blinded(resp) => resp.consensus_block_value, @@ -529,9 +529,9 @@ pub struct BeaconBlockResponse> { /// The Blobs / Proofs associated with the new block pub blob_items: Option<(KzgProofs, BlobsList)>, /// The execution layer reward for the block - pub execution_payload_value: Option, + pub execution_payload_value: Uint256, /// The consensus layer reward to the proposer - pub consensus_block_value: Option, + pub consensus_block_value: u64, } impl FinalizationAndCanonicity { @@ -3621,9 +3621,11 @@ impl BeaconChain { } // Allow the validator monitor to learn about a new valid state. - self.validator_monitor - .write() - .process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), state); + self.validator_monitor.write().process_valid_state( + current_slot.epoch(T::EthSpec::slots_per_epoch()), + state, + &self.spec, + ); let validator_monitor = self.validator_monitor.read(); @@ -5165,8 +5167,11 @@ impl BeaconChain { .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, bls_to_execution_changes: bls_to_execution_changes.into(), - blob_kzg_commitments: kzg_commitments - .ok_or(BlockProductionError::InvalidPayloadFork)?, + blob_kzg_commitments: kzg_commitments.ok_or( + BlockProductionError::MissingKzgCommitment( + "Kzg commitments missing from block contents".to_string(), + ), + )?, }, }), maybe_blobs_and_proofs, @@ -5281,8 +5286,8 @@ impl BeaconChain { block, state, blob_items, - execution_payload_value: Some(execution_payload_value), - consensus_block_value: Some(consensus_block_value), + execution_payload_value, + consensus_block_value, }) } @@ -5558,6 +5563,7 @@ impl BeaconChain { parent_block_hash: forkchoice_update_params.head_hash.unwrap_or_default(), payload_attributes: payload_attributes.into(), }, + metadata: Default::default(), version: Some(self.spec.fork_name_at_slot::(prepare_slot)), })); } diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index b51592cadd..fa6c93a3ee 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -14,13 +14,15 @@ use lru::LruCache; use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; use std::cmp::Ordering; +use std::num::NonZeroUsize; +use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, }; /// The number of sets of proposer indices that should be cached. -const CACHE_SIZE: usize = 16; +const CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); /// This value is fairly unimportant, it's used to avoid heap allocations. The result of it being /// incorrect is non-substantial from a consensus perspective (and probably also from a diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index cc087e74a0..a4750fd6ef 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -420,7 +420,7 @@ pub fn validate_blob_sidecar_for_gossip( if chain .observed_blob_sidecars .read() - .is_known(&blob_sidecar) + .proposer_is_known(&blob_sidecar) .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? { return Err(GossipBlobError::RepeatBlob { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index e86ca85bbf..23c9ab7257 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -780,7 +780,10 @@ impl GossipVerifiedBlock { // it to the slasher if an error occurs, because that's the end of this block's journey, // and it could be a repeat proposal (a likely cause for slashing!). let header = block.signed_block_header(); - Self::new_without_slasher_checks(block, chain).map_err(|e| { + // The `SignedBeaconBlock` and `SignedBeaconBlockHeader` have the same canonical root, + // but it's way quicker to calculate root of the header since the hash of the tree rooted + // at `BeaconBlockBody` is already computed in the header. + Self::new_without_slasher_checks(block, &header, chain).map_err(|e| { process_block_slash_info::<_, BlockError>( chain, BlockSlashInfo::from_early_error_block(header, e), @@ -791,6 +794,7 @@ impl GossipVerifiedBlock { /// As for new, but doesn't pass the block to the slasher. fn new_without_slasher_checks( block: Arc>, + block_header: &SignedBeaconBlockHeader, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. @@ -810,7 +814,7 @@ impl GossipVerifiedBlock { }); } - let block_root = get_block_root(&block); + let block_root = get_block_header_root(block_header); // Disallow blocks that conflict with the anchor (weak subjectivity checkpoint), if any. check_block_against_anchor_slot(block.message(), chain)?; @@ -838,10 +842,11 @@ impl GossipVerifiedBlock { &fork_choice_read_lock, block, )?; - drop(fork_choice_read_lock); let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); - let (parent_block, block) = verify_parent_block_is_known(block_root, chain, block)?; + let (parent_block, block) = + verify_parent_block_is_known::(block_root, &fork_choice_read_lock, block)?; + drop(fork_choice_read_lock); // Track the number of skip slots between the block and its parent. metrics::set_gauge( @@ -1770,19 +1775,28 @@ pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { block_root } +/// Returns the canonical root of the given `block_header`. +/// +/// Use this function to ensure that we report the block hashing time Prometheus metric. +pub fn get_block_header_root(block_header: &SignedBeaconBlockHeader) -> Hash256 { + let block_root_timer = metrics::start_timer(&metrics::BLOCK_HEADER_PROCESSING_BLOCK_ROOT); + + let block_root = block_header.message.canonical_root(); + + metrics::stop_timer(block_root_timer); + + block_root +} + /// Verify the parent of `block` is known, returning some information about the parent block from /// fork choice. #[allow(clippy::type_complexity)] fn verify_parent_block_is_known( block_root: Hash256, - chain: &BeaconChain, + fork_choice_read_lock: &RwLockReadGuard>, block: Arc>, ) -> Result<(ProtoBlock, Arc>), BlockError> { - if let Some(proto_block) = chain - .canonical_head - .fork_choice_read_lock() - .get_block(&block.parent_root()) - { + if let Some(proto_block) = fork_choice_read_lock.get_block(&block.parent_root()) { Ok((proto_block, block)) } else { Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs( diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index fbd255126e..00f5b04d2a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -786,6 +786,7 @@ where validator_monitor.process_valid_state( slot.epoch(TEthSpec::slots_per_epoch()), &head_snapshot.beacon_state, + &self.spec, ); } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index eff8d1d9d0..ad0e812cf2 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -17,6 +17,7 @@ use slog::{debug, error, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; +use std::num::NonZeroUsize; use std::sync::Arc; use task_executor::TaskExecutor; use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments}; @@ -32,15 +33,17 @@ mod processing_cache; mod state_lru_cache; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; +use types::non_zero_usize::new_non_zero_usize; /// The LRU Cache stores `PendingComponents` which can store up to /// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So /// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this /// to 1024 means the maximum size of the cache is ~ 0.8 GB. But the cache /// will target a size of less than 75% of capacity. -pub const OVERFLOW_LRU_CAPACITY: usize = 1024; +pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(1024); /// Until tree-states is implemented, we can't store very many states in memory :( -pub const STATE_LRU_CAPACITY: usize = 2; +pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(2); +pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get(); /// This includes a cache for any blocks or blobs that have been received over gossip or RPC /// and are awaiting more components before they can be imported. Additionally the diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 290be988c3..8b8368022a 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -42,6 +42,7 @@ use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use ssz_types::{FixedVector, VariableList}; +use std::num::NonZeroUsize; use std::{collections::HashSet, sync::Arc}; use types::blob_sidecar::BlobIdentifier; use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256}; @@ -288,7 +289,7 @@ struct Critical { } impl Critical { - pub fn new(capacity: usize) -> Self { + pub fn new(capacity: NonZeroUsize) -> Self { Self { in_memory: LruCache::new(capacity), store_keys: HashSet::new(), @@ -329,7 +330,7 @@ impl Critical { pending_components: PendingComponents, overflow_store: &OverflowStore, ) -> Result<(), AvailabilityCheckError> { - if self.in_memory.len() == self.in_memory.cap() { + if self.in_memory.len() == self.in_memory.cap().get() { // cache will overflow, must write lru entry to disk if let Some((lru_key, lru_value)) = self.in_memory.pop_lru() { overflow_store.persist_pending_components(lru_key, lru_value)?; @@ -377,12 +378,12 @@ pub struct OverflowLRUCache { /// Mutex to guard maintenance methods which move data between disk and memory maintenance_lock: Mutex<()>, /// The capacity of the LRU cache - capacity: usize, + capacity: NonZeroUsize, } impl OverflowLRUCache { pub fn new( - capacity: usize, + capacity: NonZeroUsize, beacon_store: BeaconStore, spec: ChainSpec, ) -> Result { @@ -514,7 +515,7 @@ impl OverflowLRUCache { /// maintain the cache pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> { // ensure memory usage is below threshold - let threshold = self.capacity * 3 / 4; + let threshold = self.capacity.get() * 3 / 4; self.maintain_threshold(threshold, cutoff_epoch)?; // clean up any keys on the disk that shouldn't be there self.prune_disk(cutoff_epoch)?; @@ -753,6 +754,7 @@ mod test { use std::ops::AddAssign; use store::{HotColdDB, ItemStore, LevelDB, StoreConfig}; use tempfile::{tempdir, TempDir}; + use types::non_zero_usize::new_non_zero_usize; use types::{ChainSpec, ExecPayload, MinimalEthSpec}; const LOW_VALIDATOR_COUNT: usize = 32; @@ -974,8 +976,9 @@ mod test { let harness = get_deneb_chain(log.clone(), &chain_db_path).await; let spec = harness.spec.clone(); let test_store = harness.chain.store.clone(); + let capacity_non_zero = new_non_zero_usize(capacity); let cache = Arc::new( - OverflowLRUCache::::new(capacity, test_store, spec.clone()) + OverflowLRUCache::::new(capacity_non_zero, test_store, spec.clone()) .expect("should create cache"), ); (harness, cache, chain_db_path) @@ -1477,7 +1480,7 @@ mod test { // create a new cache with the same store let recovered_cache = OverflowLRUCache::::new( - capacity, + new_non_zero_usize(capacity), harness.chain.store.clone(), harness.chain.spec.clone(), ) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index d3348b67fb..bd125a7f42 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -1,7 +1,7 @@ use crate::block_verification_types::AsBlock; use crate::{ block_verification_types::BlockImportData, - data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY}, + data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY_NON_ZERO}, eth1_finalization_cache::Eth1FinalizationData, AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, }; @@ -61,7 +61,7 @@ pub struct StateLRUCache { impl StateLRUCache { pub fn new(store: BeaconStore, spec: ChainSpec) -> Self { Self { - states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY)), + states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY_NON_ZERO)), store, spec, } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 8edb7b4fc8..4ad98a50eb 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,4 +1,5 @@ pub mod attestation_rewards; +pub mod attestation_simulator; pub mod attestation_verification; mod attester_cache; pub mod beacon_block_reward; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ca04366b01..ad095b37b5 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -10,6 +10,20 @@ use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; /// The maximum time to wait for the snapshot cache lock during a metrics scrape. const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100); +// Attestation simulator metrics +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL: &str = + "validator_monitor_attestation_simulator_head_attester_hit_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL: &str = + "validator_monitor_attestation_simulator_head_attester_miss_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL: &str = + "validator_monitor_attestation_simulator_target_attester_hit_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL: &str = + "validator_monitor_attestation_simulator_target_attester_miss_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL: &str = + "validator_monitor_attestation_simulator_source_attester_hit_total"; +pub const VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL: &str = + "validator_monitor_attestation_simulator_source_attester_miss_total"; + lazy_static! { /* * Block Processing @@ -40,6 +54,10 @@ lazy_static! { "beacon_block_processing_block_root_seconds", "Time spent calculating the block root when processing a block." ); + pub static ref BLOCK_HEADER_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( + "beacon_block_header_processing_block_root_seconds", + "Time spent calculating the block root for a beacon block header." + ); pub static ref BLOCK_PROCESSING_BLOB_ROOT: Result = try_create_histogram( "beacon_block_processing_blob_root_seconds", "Time spent calculating the blob root when processing a block." @@ -1041,6 +1059,48 @@ lazy_static! { "beacon_aggregated_attestation_subsets_total", "Count of new aggregated attestations that are subsets of already known aggregates" ); + /* + * Attestation simulator metrics + */ + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot head attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, + "Incremented if a validator is not flagged as a previous slot head attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot target attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, + "Incremented if a validator is not flagged as a previous slot target attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, + "Incremented if a validator is flagged as a previous slot source attester \ + during per slot processing", + ); + pub static ref VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS: Result = + try_create_int_counter( + VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, + "Incremented if a validator is not flagged as a previous slot source attester \ + during per slot processing", + ); + /* + * Missed block metrics + */ pub static ref VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL: Result = try_create_int_counter_vec( "validator_monitor_missed_blocks_total", "Number of non-finalized blocks missed", diff --git a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs index 4f84961449..488faf12ac 100644 --- a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs @@ -3,9 +3,10 @@ //! Only `BlobSidecar`s that have completed proposer signature verification can be added //! to this cache to reduce DoS risks. +use crate::observed_block_producers::{ProposalKey, SeenBlock}; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{BlobSidecar, EthSpec, Slot}; +use types::{BlobSidecar, EthSpec, Hash256, Slot}; #[derive(Debug, PartialEq)] pub enum Error { @@ -29,7 +30,7 @@ pub enum Error { pub struct ObservedBlobSidecars { finalized_slot: Slot, /// Stores all received blob indices for a given `(ValidatorIndex, Slot)` tuple. - items: HashMap<(u64, Slot), HashSet>, + items: HashMap, HashSet)>, _phantom: PhantomData, } @@ -50,27 +51,74 @@ impl ObservedBlobSidecars { /// /// The supplied `blob_sidecar` **MUST** have completed proposer signature verification. pub fn observe_sidecar(&mut self, blob_sidecar: &BlobSidecar) -> Result { + let block_root = blob_sidecar.block_root(); self.sanitize_blob_sidecar(blob_sidecar)?; - let did_not_exist = self + let (blob_indices, block_roots) = self .items - .entry((blob_sidecar.block_proposer_index(), blob_sidecar.slot())) - .or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block())) - .insert(blob_sidecar.index); + .entry(ProposalKey { + slot: blob_sidecar.slot(), + proposer: blob_sidecar.block_proposer_index(), + }) + .or_insert_with(|| { + ( + HashSet::with_capacity(T::max_blobs_per_block()), + HashSet::new(), + ) + }); + let did_not_exist = blob_indices.insert(blob_sidecar.index); + block_roots.insert(block_root); Ok(!did_not_exist) } /// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window. - pub fn is_known(&self, blob_sidecar: &BlobSidecar) -> Result { + pub fn proposer_is_known(&self, blob_sidecar: &BlobSidecar) -> Result { self.sanitize_blob_sidecar(blob_sidecar)?; let is_known = self .items - .get(&(blob_sidecar.block_proposer_index(), blob_sidecar.slot())) - .map_or(false, |set| set.contains(&blob_sidecar.index)); + .get(&ProposalKey { + slot: blob_sidecar.slot(), + proposer: blob_sidecar.block_proposer_index(), + }) + .map_or(false, |(blob_indices, _block_roots)| { + blob_indices.contains(&blob_sidecar.index) + }); Ok(is_known) } + /// Returns `Ok(true)` if the `block_root` has been observed in a blob sidecar message before, `Ok(false)` if not. + /// Does not update the cache, so calling this function multiple times will continue to return + /// `Ok(false)`, until `Self::observe_proposer` is called. + /// + /// ## Errors + /// + /// - `key.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. + /// - `key.slot` is equal to or less than the latest pruned `finalized_slot`. + pub fn proposer_has_been_observed( + &self, + slot: Slot, + proposer: u64, + block_root: Hash256, + ) -> Result { + let key = ProposalKey { slot, proposer }; + if let Some((_, block_roots)) = self.items.get(&key) { + let block_already_known = block_roots.contains(&block_root); + let no_prev_known_blocks = + block_roots.difference(&HashSet::from([block_root])).count() == 0; + + if !no_prev_known_blocks { + Ok(SeenBlock::Slashable) + } else if block_already_known { + Ok(SeenBlock::Duplicate) + } else { + Ok(SeenBlock::UniqueNonSlashable) + } + } else { + Ok(SeenBlock::UniqueNonSlashable) + } + } + fn sanitize_blob_sidecar(&self, blob_sidecar: &BlobSidecar) -> Result<(), Error> { if blob_sidecar.index >= T::max_blobs_per_block() as u64 { return Err(Error::InvalidBlobIndex(blob_sidecar.index)); @@ -93,7 +141,7 @@ impl ObservedBlobSidecars { } self.finalized_slot = finalized_slot; - self.items.retain(|k, _| k.1 > finalized_slot); + self.items.retain(|k, _| k.slot > finalized_slot); } } @@ -140,14 +188,20 @@ mod tests { 1, "only one (validator_index, slot) tuple should be present" ); + + let (cached_blob_indices, cached_block_roots) = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 1, - "only one item should be present" + "only one proposer should be present" + ); + assert_eq!( + cached_block_roots.len(), + 1, + "only one block root should be present" ); /* @@ -158,14 +212,19 @@ mod tests { assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let (cached_blob_indices, cached_block_roots) = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 1, - "only one item should be present" + "only one proposer should be present" + ); + assert_eq!( + cached_block_roots.len(), + 1, + "only one block root should be present" ); /* @@ -215,15 +274,20 @@ mod tests { ); assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let (cached_blob_indices, cached_block_roots) = cache + .items + .get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs))) + .expect("the three epochs slot should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_b, Slot::new(three_epochs))) - .expect("the three epochs slot should be present") - .len(), + cached_blob_indices.len(), 1, "only one proposer should be present" ); + assert_eq!( + cached_block_roots.len(), + 1, + "only one block root should be present" + ); /* * Check that a prune doesnt wipe later blocks @@ -239,15 +303,20 @@ mod tests { ); assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let (cached_blob_indices, cached_block_roots) = cache + .items + .get(&ProposalKey::new(proposer_index_b, Slot::new(three_epochs))) + .expect("the three epochs slot should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_b, Slot::new(three_epochs))) - .expect("the three epochs slot should be present") - .len(), + cached_blob_indices.len(), 1, "only one proposer should be present" ); + assert_eq!( + cached_block_roots.len(), + 1, + "only one block root should be present" + ); } #[test] @@ -259,7 +328,7 @@ mod tests { let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); assert_eq!( - cache.is_known(&sidecar_a), + cache.proposer_is_known(&sidecar_a), Ok(false), "no observation in empty cache" ); @@ -271,7 +340,7 @@ mod tests { ); assert_eq!( - cache.is_known(&sidecar_a), + cache.proposer_is_known(&sidecar_a), Ok(true), "observed block is indicated as true" ); @@ -284,15 +353,20 @@ mod tests { assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 1, "only one slot should be present"); + let (cached_blob_indices, cached_block_roots) = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 1, "only one proposer should be present" ); + assert_eq!( + cached_block_roots.len(), + 1, + "only one block root should be present" + ); // Slot 1, proposer 0 @@ -300,7 +374,7 @@ mod tests { let sidecar_b = get_blob_sidecar(1, proposer_index_b, 0); assert_eq!( - cache.is_known(&sidecar_b), + cache.proposer_is_known(&sidecar_b), Ok(false), "no observation for new slot" ); @@ -310,7 +384,7 @@ mod tests { "can observe proposer for new slot, indicates proposer unobserved" ); assert_eq!( - cache.is_known(&sidecar_b), + cache.proposer_is_known(&sidecar_b), Ok(true), "observed block in slot 1 is indicated as true" ); @@ -322,30 +396,40 @@ mod tests { assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 2, "two slots should be present"); + let (cached_blob_indices, cached_block_roots) = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 1, "only one proposer should be present in slot 0" ); assert_eq!( - cache - .items - .get(&(proposer_index_b, Slot::new(1))) - .expect("slot zero should be present") - .len(), + cached_block_roots.len(), + 1, + "only one block root should be present in slot 0" + ); + let (cached_blob_indices, cached_block_roots) = cache + .items + .get(&ProposalKey::new(proposer_index_b, Slot::new(1))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), 1, "only one proposer should be present in slot 1" ); + assert_eq!( + cached_block_roots.len(), + 1, + "only one block root should be present in slot 1" + ); // Slot 0, index 1 let sidecar_c = get_blob_sidecar(0, proposer_index_a, 1); assert_eq!( - cache.is_known(&sidecar_c), + cache.proposer_is_known(&sidecar_c), Ok(false), "no observation for new index" ); @@ -355,7 +439,7 @@ mod tests { "can observe new index, indicates sidecar unobserved for new index" ); assert_eq!( - cache.is_known(&sidecar_c), + cache.proposer_is_known(&sidecar_c), Ok(true), "observed new sidecar is indicated as true" ); @@ -367,15 +451,57 @@ mod tests { assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.items.len(), 2, "two slots should be present"); + let (cached_blob_indices, cached_block_roots) = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); assert_eq!( - cache - .items - .get(&(proposer_index_a, Slot::new(0))) - .expect("slot zero should be present") - .len(), + cached_blob_indices.len(), 2, "two blob indices should be present in slot 0" ); + // Changing the blob index doesn't change the block root, so only one unique signed + // header should be in the cache. + assert_eq!( + cached_block_roots.len(), + 1, + "one block root should be present in slot 0" + ); + + // Create a sidecar sharing slot and proposer but with a different block root. + let mut sidecar_d: BlobSidecar = BlobSidecar { + index: sidecar_c.index, + blob: sidecar_c.blob.clone(), + kzg_commitment: sidecar_c.kzg_commitment, + kzg_proof: sidecar_c.kzg_proof, + signed_block_header: sidecar_c.signed_block_header.clone(), + kzg_commitment_inclusion_proof: sidecar_c.kzg_commitment_inclusion_proof.clone(), + }; + sidecar_d.signed_block_header.message.body_root = Hash256::repeat_byte(7); + assert_eq!( + cache.proposer_is_known(&sidecar_d), + Ok(true), + "there has been an observation for this proposer index" + ); + assert_eq!( + cache.observe_sidecar(&sidecar_d), + Ok(true), + "indicates sidecar proposer was observed" + ); + let (cached_blob_indices, cached_block_roots) = cache + .items + .get(&ProposalKey::new(proposer_index_a, Slot::new(0))) + .expect("slot zero should be present"); + assert_eq!( + cached_blob_indices.len(), + 2, + "two blob indices should be present in slot 0" + ); + assert_eq!( + cached_block_roots.len(), + 2, + "two block root should be present in slot 0" + ); // Try adding an out of bounds index let invalid_index = E::max_blobs_per_block() as u64; diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index f76fc53796..096c8bff77 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -16,9 +16,15 @@ pub enum Error { } #[derive(Eq, Hash, PartialEq, Debug, Default)] -struct ProposalKey { - slot: Slot, - proposer: u64, +pub struct ProposalKey { + pub slot: Slot, + pub proposer: u64, +} + +impl ProposalKey { + pub fn new(proposer: u64, slot: Slot) -> Self { + Self { slot, proposer } + } } /// Maintains a cache of observed `(block.slot, block.proposer)`. diff --git a/beacon_node/beacon_chain/src/pre_finalization_cache.rs b/beacon_node/beacon_chain/src/pre_finalization_cache.rs index 112394bb18..22b76e026c 100644 --- a/beacon_node/beacon_chain/src/pre_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/pre_finalization_cache.rs @@ -3,11 +3,13 @@ use itertools::process_results; use lru::LruCache; use parking_lot::Mutex; use slog::debug; +use std::num::NonZeroUsize; use std::time::Duration; +use types::non_zero_usize::new_non_zero_usize; use types::Hash256; -const BLOCK_ROOT_CACHE_LIMIT: usize = 512; -const LOOKUP_LIMIT: usize = 8; +const BLOCK_ROOT_CACHE_LIMIT: NonZeroUsize = new_non_zero_usize(512); +const LOOKUP_LIMIT: NonZeroUsize = new_non_zero_usize(8); const METRICS_TIMEOUT: Duration = Duration::from_millis(100); /// Cache for rejecting attestations to blocks from before finalization. @@ -78,7 +80,7 @@ impl BeaconChain { // 3. Check the network with a single block lookup. cache.in_progress_lookups.put(block_root, ()); - if cache.in_progress_lookups.len() == LOOKUP_LIMIT { + if cache.in_progress_lookups.len() == LOOKUP_LIMIT.get() { // NOTE: we expect this to occur sometimes if a lot of blocks that we look up fail to be // imported for reasons other than being pre-finalization. The cache will eventually // self-repair in this case by replacing old entries with new ones until all the failed diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 8cea9c0769..8d82a0c06b 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -10,6 +10,7 @@ use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use smallvec::SmallVec; +use state_processing::common::get_attestation_participation_flag_indices; use state_processing::per_epoch_processing::{ errors::EpochProcessingError, EpochProcessingSummary, }; @@ -21,8 +22,11 @@ use std::str::Utf8Error; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::AbstractExecPayload; +use types::consts::altair::{ + TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, +}; use types::{ - AttesterSlashing, BeaconBlockRef, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, + Attestation, AttesterSlashing, BeaconBlockRef, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, }; @@ -69,6 +73,15 @@ impl Default for ValidatorMonitorConfig { } } +/// The goal is to check the behaviour of the BN if it pretends to attest at each slot +/// Check the head/target/source once the state.slot is some slots beyond attestation.data.slot +/// to defend against re-orgs. 16 slots is the minimum to defend against re-orgs of up to 16 slots. +pub const UNAGGREGATED_ATTESTATION_LAG_SLOTS: usize = 16; + +/// Bound the storage size of simulated attestations. The head state can only verify attestations +/// from the current and previous epoch. +pub const MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH: usize = 64; + #[derive(Debug)] pub enum Error { InvalidPubkey(String), @@ -370,7 +383,7 @@ struct MissedBlock { /// /// The intention of this struct is to provide users with more logging and Prometheus metrics around /// validators that they are interested in. -pub struct ValidatorMonitor { +pub struct ValidatorMonitor { /// The validators that require additional monitoring. validators: HashMap, /// A map of validator index (state.validators) to a validator public key. @@ -386,6 +399,8 @@ pub struct ValidatorMonitor { missed_blocks: HashSet, // A beacon proposer cache beacon_proposer_cache: Arc>, + // Unaggregated attestations generated by the committee index at each slot. + unaggregated_attestations: HashMap>, log: Logger, _phantom: PhantomData, } @@ -409,6 +424,7 @@ impl ValidatorMonitor { individual_tracking_threshold, missed_blocks: <_>::default(), beacon_proposer_cache, + unaggregated_attestations: <_>::default(), log, _phantom: PhantomData, }; @@ -426,7 +442,7 @@ impl ValidatorMonitor { } /// Add some validators to `self` for additional monitoring. - fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) { + pub fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) { let index_opt = self .indices .iter() @@ -444,9 +460,32 @@ impl ValidatorMonitor { }); } + /// Add an unaggregated attestation + pub fn set_unaggregated_attestation(&mut self, attestation: Attestation) { + let unaggregated_attestations = &mut self.unaggregated_attestations; + + // Pruning, this removes the oldest key/pair of the hashmap if it's greater than MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH + if unaggregated_attestations.len() >= MAX_UNAGGREGATED_ATTESTATION_HASHMAP_LENGTH { + if let Some(oldest_slot) = unaggregated_attestations.keys().min().copied() { + unaggregated_attestations.remove(&oldest_slot); + } + } + let slot = attestation.data.slot; + self.unaggregated_attestations.insert(slot, attestation); + } + + pub fn get_unaggregated_attestation(&self, slot: Slot) -> Option<&Attestation> { + self.unaggregated_attestations.get(&slot) + } + /// Reads information from the given `state`. The `state` *must* be valid (i.e, able to be /// imported). - pub fn process_valid_state(&mut self, current_epoch: Epoch, state: &BeaconState) { + pub fn process_valid_state( + &mut self, + current_epoch: Epoch, + state: &BeaconState, + spec: &ChainSpec, + ) { // Add any new validator indices. state .validators() @@ -463,6 +502,7 @@ impl ValidatorMonitor { // Add missed non-finalized blocks for the monitored validators self.add_validators_missed_blocks(state); + self.process_unaggregated_attestations(state, spec); // Update metrics for individual validators. for monitored_validator in self.validators.values() { @@ -562,8 +602,10 @@ impl ValidatorMonitor { let end_slot = current_slot.saturating_sub(MISSED_BLOCK_LAG_SLOTS).as_u64(); - // List of proposers per epoch from the beacon_proposer_cache - let mut proposers_per_epoch: Option> = None; + // List of proposers per epoch from the beacon_proposer_cache, and the epoch at which the + // cache is valid. + let mut proposers_per_epoch: Option<(SmallVec<[usize; TYPICAL_SLOTS_PER_EPOCH]>, Epoch)> = + None; for (prev_slot, slot) in (start_slot.as_u64()..=end_slot) .map(Slot::new) @@ -577,25 +619,30 @@ impl ValidatorMonitor { // Found missed block if block_root == prev_block_root { let slot_epoch = slot.epoch(T::slots_per_epoch()); - let prev_slot_epoch = prev_slot.epoch(T::slots_per_epoch()); if let Ok(shuffling_decision_block) = state.proposer_shuffling_decision_root_at_epoch(slot_epoch, *block_root) { - // Only update the cache if it needs to be initialised or because - // slot is at epoch + 1 - if proposers_per_epoch.is_none() || slot_epoch != prev_slot_epoch { - proposers_per_epoch = self.get_proposers_by_epoch_from_cache( - slot_epoch, - shuffling_decision_block, - ); + // Update the cache if it has not yet been initialised, or if it is + // initialised for a prior epoch. This is an optimisation to avoid bouncing + // the proposer shuffling cache lock when there are lots of missed blocks. + if proposers_per_epoch + .as_ref() + .map_or(true, |(_, cached_epoch)| *cached_epoch != slot_epoch) + { + proposers_per_epoch = self + .get_proposers_by_epoch_from_cache( + slot_epoch, + shuffling_decision_block, + ) + .map(|cache| (cache, slot_epoch)); } // Only add missed blocks for the proposer if it's in the list of monitored validators let slot_in_epoch = slot % T::slots_per_epoch(); if let Some(proposer_index) = proposers_per_epoch - .as_deref() - .and_then(|proposers| proposers.get(slot_in_epoch.as_usize())) + .as_ref() + .and_then(|(proposers, _)| proposers.get(slot_in_epoch.as_usize())) { let i = *proposer_index as u64; if let Some(pub_key) = self.indices.get(&i) { @@ -634,7 +681,8 @@ impl ValidatorMonitor { debug!( self.log, "Could not get proposers from cache"; - "epoch" => ?slot_epoch + "epoch" => ?slot_epoch, + "decision_root" => ?shuffling_decision_block, ); } } @@ -654,6 +702,107 @@ impl ValidatorMonitor { .cloned() } + /// Process the unaggregated attestations generated by the service `attestation_simulator_service` + /// and check if the attestation qualifies for a reward matching the flags source/target/head + fn process_unaggregated_attestations(&mut self, state: &BeaconState, spec: &ChainSpec) { + let current_slot = state.slot(); + + // Ensures that we process attestation when there have been skipped slots between blocks + let attested_slots: Vec<_> = self + .unaggregated_attestations + .keys() + .filter(|&&attestation_slot| { + attestation_slot + < current_slot - Slot::new(UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64) + }) + .cloned() + .collect(); + + let unaggregated_attestations = &mut self.unaggregated_attestations; + for slot in attested_slots { + if let Some(unaggregated_attestation) = unaggregated_attestations.remove(&slot) { + // Don't process this attestation, it's too old to be processed by this state. + if slot.epoch(T::slots_per_epoch()) < state.previous_epoch() { + continue; + } + + // We are simulating that unaggregated attestation in a service that produces unaggregated attestations + // every slot, the inclusion_delay shouldn't matter here as long as the minimum value + // that qualifies the committee index for reward is included + let inclusion_delay = spec.min_attestation_inclusion_delay; + + // Get the reward indices for the unaggregated attestation or log an error + match get_attestation_participation_flag_indices( + state, + &unaggregated_attestation.data, + inclusion_delay, + spec, + ) { + Ok(flag_indices) => { + let head_hit = flag_indices.contains(&TIMELY_HEAD_FLAG_INDEX); + let target_hit = flag_indices.contains(&TIMELY_TARGET_FLAG_INDEX); + let source_hit = flag_indices.contains(&TIMELY_SOURCE_FLAG_INDEX); + + if head_hit { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT, + ); + } else { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS, + ); + } + if target_hit { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT, + ); + } else { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS, + ); + } + if source_hit { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT, + ); + } else { + metrics::inc_counter( + &metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS, + ); + } + + let data = &unaggregated_attestation.data; + debug!( + self.log, + "Simulated attestation evaluated"; + "attestation_source" => ?data.source.root, + "attestation_target" => ?data.target.root, + "attestation_head" => ?data.beacon_block_root, + "attestation_slot" => ?data.slot, + "source_hit" => source_hit, + "target_hit" => target_hit, + "head_hit" => head_hit, + ); + } + Err(err) => { + error!( + self.log, + "Failed to get attestation participation flag indices"; + "error" => ?err, + "unaggregated_attestation" => ?unaggregated_attestation, + ); + } + } + } else { + error!( + self.log, + "Failed to remove unaggregated attestation from the hashmap"; + "slot" => ?slot, + ); + } + } + } + /// Run `func` with the `TOTAL_LABEL` and optionally the /// `individual_id`. /// diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index fdc37b5529..ff83b25320 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,8 +1,10 @@ #![cfg(not(debug_assertions))] +use beacon_chain::attestation_simulator::produce_unaggregated_attestation; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; -use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; +use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; +use beacon_chain::{metrics, StateSkipConfig, WhenSlotSkipped}; use lazy_static::lazy_static; use std::sync::Arc; use tree_hash::TreeHash; @@ -15,6 +17,91 @@ lazy_static! { static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); } +/// This test builds a chain that is testing the performance of the unaggregated attestations +/// produced by the attestation simulator service. +#[tokio::test] +async fn produces_attestations_from_attestation_simulator_service() { + // Produce 2 epochs, or 64 blocks + let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 2; + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .keypairs(KEYPAIRS[..].to_vec()) + .fresh_ephemeral_store() + .mock_execution_layer() + .build(); + + let chain = &harness.chain; + + // Test all valid committee indices and their rewards for all slots in the chain + // using validator monitor + for slot in 0..=num_blocks_produced { + // We do not produce at slot=0, and there's no committe cache available anyway + if slot > 0 && slot <= num_blocks_produced { + harness.advance_slot(); + + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + // Set the state to the current slot + let slot = Slot::from(slot); + let mut state = chain + .state_at_slot(slot, StateSkipConfig::WithStateRoots) + .expect("should get state"); + + // Prebuild the committee cache for the current epoch + state + .build_committee_cache(RelativeEpoch::Current, &harness.chain.spec) + .unwrap(); + + // Produce an unaggragetated attestation + produce_unaggregated_attestation(chain.clone(), chain.slot().unwrap()); + + // Verify that the ua is stored in validator monitor + let validator_monitor = chain.validator_monitor.read(); + validator_monitor + .get_unaggregated_attestation(slot) + .expect("should get unaggregated attestation"); + } + + // Compare the prometheus metrics that evaluates the performance of the unaggregated attestations + let hit_prometheus_metrics = vec![ + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, + ]; + let miss_prometheus_metrics = vec![ + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, + metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, + ]; + + // Expected metrics count should only apply to hit metrics as miss metrics are never set, nor can be found + // when gathering prometheus metrics. If they are found, which should not, it will diff from 0 and fail the test + let expected_miss_metrics_count = 0; + let expected_hit_metrics_count = + num_blocks_produced - UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64; + lighthouse_metrics::gather().iter().for_each(|mf| { + if hit_prometheus_metrics.contains(&mf.get_name()) { + assert_eq!( + mf.get_metric()[0].get_counter().get_value() as u64, + expected_hit_metrics_count + ); + } + if miss_prometheus_metrics.contains(&mf.get_name()) { + assert_eq!( + mf.get_metric()[0].get_counter().get_value() as u64, + expected_miss_metrics_count + ); + } + }); +} + /// This test builds a chain that is just long enough to finalize an epoch then it produces an /// attestation at each slot from genesis through to three epochs past the head. /// diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 5bc6b758c2..d9ff57b1b0 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -1,9 +1,9 @@ -use lazy_static::lazy_static; - use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; +use lazy_static::lazy_static; +use logging::test_logger; use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; // Should ideally be divisible by 3. @@ -23,6 +23,7 @@ fn get_harness( let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .logger(test_logger()) .fresh_ephemeral_store() .mock_execution_layer() .validator_monitor_config(ValidatorMonitorConfig { @@ -39,6 +40,83 @@ fn get_harness( harness } +// Regression test for off-by-one caching issue in missed block detection. +#[tokio::test] +async fn missed_blocks_across_epochs() { + let slots_per_epoch = E::slots_per_epoch(); + let all_validators = (0..VALIDATOR_COUNT).collect::>(); + + let harness = get_harness(VALIDATOR_COUNT, vec![]); + let validator_monitor = &harness.chain.validator_monitor; + let mut genesis_state = harness.get_current_state(); + let genesis_state_root = genesis_state.update_tree_hash_cache().unwrap(); + let genesis_block_root = harness.head_block_root(); + + // Skip a slot in the first epoch (to prime the cache inside the missed block function) and then + // at a different offset in the 2nd epoch. The missed block in the 2nd epoch MUST NOT reuse + // the cache from the first epoch. + let first_skip_offset = 3; + let second_skip_offset = slots_per_epoch / 2; + assert_ne!(first_skip_offset, second_skip_offset); + let first_skip_slot = Slot::new(first_skip_offset); + let second_skip_slot = Slot::new(slots_per_epoch + second_skip_offset); + let slots = (1..2 * slots_per_epoch) + .map(Slot::new) + .filter(|slot| *slot != first_skip_slot && *slot != second_skip_slot) + .collect::>(); + + let (block_roots_by_slot, state_roots_by_slot, _, head_state) = harness + .add_attested_blocks_at_slots(genesis_state, genesis_state_root, &slots, &all_validators) + .await; + + // Prime the proposer shuffling cache. + let mut proposer_shuffling_cache = harness.chain.beacon_proposer_cache.lock(); + for epoch in [0, 1].into_iter().map(Epoch::new) { + let start_slot = epoch.start_slot(slots_per_epoch) + 1; + let state = harness + .get_hot_state(state_roots_by_slot[&start_slot]) + .unwrap(); + let decision_root = state + .proposer_shuffling_decision_root(genesis_block_root) + .unwrap(); + proposer_shuffling_cache + .insert( + epoch, + decision_root, + state + .get_beacon_proposer_indices(&harness.chain.spec) + .unwrap(), + state.fork(), + ) + .unwrap(); + } + drop(proposer_shuffling_cache); + + // Monitor the validator that proposed the block at the same offset in the 0th epoch as the skip + // in the 1st epoch. + let innocent_proposer_slot = Slot::new(second_skip_offset); + let innocent_proposer = harness + .get_block(block_roots_by_slot[&innocent_proposer_slot]) + .unwrap() + .message() + .proposer_index(); + + let mut vm_write = validator_monitor.write(); + + // Call `process_` once to update validator indices. + vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec); + // Start monitoring the innocent validator. + vm_write.add_validator_pubkey(KEYPAIRS[innocent_proposer as usize].pk.compress()); + // Check for missed blocks. + vm_write.process_valid_state(head_state.current_epoch(), &head_state, &harness.chain.spec); + + // My client is innocent, your honour! + assert_eq!( + vm_write.get_monitored_validator_missed_block_count(innocent_proposer), + 0 + ); +} + #[tokio::test] async fn produces_missed_blocks() { let validator_count = 16; @@ -110,7 +188,7 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor = harness1.chain.validator_monitor.write(); - validator_monitor.process_valid_state(nb_epoch_to_simulate, _state); + validator_monitor.process_valid_state(nb_epoch_to_simulate, _state, &harness1.chain.spec); // We should have one entry in the missed blocks map assert_eq!( @@ -193,7 +271,7 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor2 = harness2.chain.validator_monitor.write(); - validator_monitor2.process_valid_state(epoch, _state2); + validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We should have one entry in the missed blocks map assert_eq!( validator_monitor2.get_monitored_validator_missed_block_count(validator_index as u64), @@ -219,7 +297,7 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor - validator_monitor2.process_valid_state(epoch, _state2); + validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We shouldn't have any entry in the missed blocks map assert_ne!(validator_index, not_monitored_validator_index); @@ -288,7 +366,7 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor3 = harness3.chain.validator_monitor.write(); - validator_monitor3.process_valid_state(epoch, _state3); + validator_monitor3.process_valid_state(epoch, _state3, &harness3.chain.spec); // We shouldn't have one entry in the missed blocks map assert_eq!( diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 1c675d280f..79b46f90a5 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -106,7 +106,7 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024; /// before we start dropping them. const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; -/// The maximum number of queued `SignedBlobSidecar` objects received on gossip that +/// The maximum number of queued `BlobSidecar` objects received on gossip that /// will be stored before we start dropping them. const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024; @@ -1304,7 +1304,7 @@ impl BeaconProcessor { ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL, - gossip_block_queue.len() as i64, + gossip_blob_queue.len() as i64, ); metrics::set_gauge( &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 9082a7d474..fa7d7d7b9a 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -49,7 +49,7 @@ lazy_static::lazy_static! { // Gossip blobs. pub static ref BEACON_PROCESSOR_GOSSIP_BLOB_QUEUE_TOTAL: Result = try_create_int_gauge( "beacon_processor_gossip_blob_queue_total", - "Count of blocks from gossip waiting to be verified." + "Count of blobs from gossip waiting to be verified." ); // Gossip Exits. pub static ref BEACON_PROCESSOR_EXIT_QUEUE_TOTAL: Result = try_create_int_gauge( diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index bfd55c3beb..c6c238fd5a 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -2,6 +2,7 @@ use crate::address_change_broadcast::broadcast_address_changes_at_capella; use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::notifier::spawn_notifier; use crate::Client; +use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; @@ -839,6 +840,10 @@ where runtime_context.executor.clone(), beacon_chain.clone(), ); + start_attestation_simulator_service( + beacon_chain.task_executor.clone(), + beacon_chain.clone(), + ); } Ok(Client { diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index 362f5b0b2b..bc8e4e3140 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -8,17 +8,19 @@ use crate::HttpJsonRpc; use lru::LruCache; use slog::{debug, error, info, warn, Logger}; use std::future::Future; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::{watch, Mutex, RwLock}; use tokio_stream::wrappers::WatchStream; +use types::non_zero_usize::new_non_zero_usize; use types::ExecutionBlockHash; /// The number of payload IDs that will be stored for each `Engine`. /// /// Since the size of each value is small (~800 bytes) a large number is used for safety. -const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512; +const PAYLOAD_ID_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(512); const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes /// Stores the remembered state of a engine. diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 6b0277ff31..6841f4d50e 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -29,6 +29,7 @@ use std::collections::HashMap; use std::fmt; use std::future::Future; use std::io::Write; +use std::num::NonZeroUsize; use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; @@ -42,6 +43,7 @@ use tokio_stream::wrappers::WatchStream; use tree_hash::TreeHash; use types::beacon_block_body::KzgCommitments; use types::builder_bid::BuilderBid; +use types::non_zero_usize::new_non_zero_usize; use types::payload::BlockProductionVersion; use types::{ AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock, @@ -68,7 +70,7 @@ pub const DEFAULT_JWT_FILE: &str = "jwt.hex"; /// Each time the `ExecutionLayer` retrieves a block from an execution node, it stores that block /// in an LRU cache to avoid redundant lookups. This is the size of that cache. -const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128; +const EXECUTION_BLOCKS_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); /// A fee recipient address for use during block production. Only used as a very last resort if /// there is no address provided by the user. @@ -176,15 +178,26 @@ impl From>> for BlockProposalContents> { fn from(item: BlockProposalContents>) -> Self { - let block_value = item.block_value().to_owned(); - - let blinded_payload: BlockProposalContents> = + match item { BlockProposalContents::Payload { - payload: item.to_payload().execution_payload().into(), + payload, block_value, - }; - - blinded_payload + } => BlockProposalContents::Payload { + payload: payload.execution_payload().into(), + block_value, + }, + BlockProposalContents::PayloadAndBlobs { + payload, + block_value, + kzg_commitments, + blobs_and_proofs: _, + } => BlockProposalContents::PayloadAndBlobs { + payload: payload.execution_payload().into(), + block_value, + kzg_commitments, + blobs_and_proofs: None, + }, + } } } diff --git a/beacon_node/execution_layer/src/payload_cache.rs b/beacon_node/execution_layer/src/payload_cache.rs index 1155b1ca3a..1a2864c194 100644 --- a/beacon_node/execution_layer/src/payload_cache.rs +++ b/beacon_node/execution_layer/src/payload_cache.rs @@ -1,10 +1,12 @@ use eth2::types::FullPayloadContents; use lru::LruCache; use parking_lot::Mutex; +use std::num::NonZeroUsize; use tree_hash::TreeHash; +use types::non_zero_usize::new_non_zero_usize; use types::{EthSpec, Hash256}; -pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10; +pub const DEFAULT_PAYLOAD_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(10); /// A cache mapping execution payloads by tree hash roots. pub struct PayloadCache { diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 7da2022d58..3d4ea51f4b 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -335,8 +335,9 @@ pub fn serve( .el .get_payload_by_root(&root) .ok_or_else(|| reject("missing payload for tx root"))?; - let resp = ForkVersionedResponse { + let resp: ForkVersionedResponse<_> = ForkVersionedResponse { version: Some(fork_name), + metadata: Default::default(), data: payload, }; @@ -616,8 +617,9 @@ pub fn serve( .spec .fork_name_at_epoch(slot.epoch(E::slots_per_epoch())); let signed_bid = SignedBuilderBid { message, signature }; - let resp = ForkVersionedResponse { + let resp: ForkVersionedResponse<_> = ForkVersionedResponse { version: Some(fork_name), + metadata: Default::default(), data: signed_bid, }; let json_bid = serde_json::to_string(&resp) diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 299bc019c4..ad71e9e9d0 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -3,13 +3,13 @@ use eth2::lighthouse::{BlockReward, BlockRewardsQuery}; use lru::LruCache; use slog::{debug, warn, Logger}; use state_processing::BlockReplayer; +use std::num::NonZeroUsize; use std::sync::Arc; use types::beacon_block::BlindedBeaconBlock; -use warp_utils::reject::{ - beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error, -}; +use types::non_zero_usize::new_non_zero_usize; +use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; -const STATE_CACHE_SIZE: usize = 2; +const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2); /// Fetch block rewards for blocks from the canonical chain. pub fn get_block_rewards( @@ -164,11 +164,7 @@ pub fn compute_block_rewards( .build_all_committee_caches(&chain.spec) .map_err(beacon_state_error)?; - state_cache - .get_or_insert((parent_root, block.slot()), || state) - .ok_or_else(|| { - custom_server_error("LRU cache insert should always succeed".into()) - })? + state_cache.get_or_insert((parent_root, block.slot()), || state) }; // Compute block reward. diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0b25015e26..1b4f1d35e1 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -77,12 +77,12 @@ use tokio_stream::{ StreamExt, }; use types::{ - Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, - CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, - ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, - SyncContributionData, + fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, + AttesterSlashing, BeaconStateError, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, + ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, RelativeEpoch, + SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -2399,6 +2399,7 @@ pub fn serve( }), _ => Ok(warp::reply::json(&ForkVersionedResponse { version: Some(fork_name), + metadata: EmptyMetadata {}, data: bootstrap, }) .into_response()), @@ -2446,6 +2447,7 @@ pub fn serve( }), _ => Ok(warp::reply::json(&ForkVersionedResponse { version: Some(fork_name), + metadata: EmptyMetadata {}, data: update, }) .into_response()), @@ -2493,6 +2495,7 @@ pub fn serve( }), _ => Ok(warp::reply::json(&ForkVersionedResponse { version: Some(fork_name), + metadata: EmptyMetadata {}, data: update, }) .into_response()), @@ -3193,7 +3196,7 @@ pub fn serve( ); if endpoint_version == V3 { - produce_block_v3(endpoint_version, accept_header, chain, slot, query).await + produce_block_v3(accept_header, chain, slot, query).await } else { produce_block_v2(endpoint_version, accept_header, chain, slot, query).await } diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 09b95136b5..ff1c7d345c 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -1,17 +1,3 @@ -use bytes::Bytes; -use std::sync::Arc; -use types::{payload::BlockProductionVersion, *}; - -use beacon_chain::{ - BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, -}; -use eth2::types::{self as api_types, EndpointVersion, SkipRandaoVerification}; -use ssz::Encode; -use warp::{ - hyper::{Body, Response}, - Reply, -}; - use crate::{ build_block_contents, version::{ @@ -20,6 +6,20 @@ use crate::{ fork_versioned_response, inconsistent_fork_rejection, }, }; +use beacon_chain::{ + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, +}; +use bytes::Bytes; +use eth2::types::{ + self as api_types, EndpointVersion, ProduceBlockV3Metadata, SkipRandaoVerification, +}; +use ssz::Encode; +use std::sync::Arc; +use types::{payload::BlockProductionVersion, *}; +use warp::{ + hyper::{Body, Response}, + Reply, +}; pub fn get_randao_verification( query: &api_types::ValidatorBlocksQuery, @@ -40,7 +40,6 @@ pub fn get_randao_verification( } pub async fn produce_block_v3( - endpoint_version: EndpointVersion, accept_header: Option, chain: Arc>, slot: Slot, @@ -68,13 +67,12 @@ pub async fn produce_block_v3( warp_utils::reject::custom_bad_request(format!("failed to fetch a block: {:?}", e)) })?; - build_response_v3(chain, block_response_type, endpoint_version, accept_header) + build_response_v3(chain, block_response_type, accept_header) } pub fn build_response_v3( chain: Arc>, block_response: BeaconBlockResponseWrapper, - endpoint_version: EndpointVersion, accept_header: Option, ) -> Result, warp::Rejection> { let fork_name = block_response @@ -84,6 +82,13 @@ pub fn build_response_v3( let consensus_block_value = block_response.consensus_block_value(); let execution_payload_blinded = block_response.is_blinded(); + let metadata = ProduceBlockV3Metadata { + consensus_version: fork_name, + execution_payload_blinded, + execution_payload_value, + consensus_block_value, + }; + let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?; match accept_header { @@ -100,12 +105,17 @@ pub fn build_response_v3( .map_err(|e| -> warp::Rejection { warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) }), - _ => fork_versioned_response(endpoint_version, fork_name, block_contents) - .map(|response| warp::reply::json(&response).into_response()) - .map(|res| add_consensus_version_header(res, fork_name)) - .map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded)) - .map(|res| add_execution_payload_value_header(res, execution_payload_value)) - .map(|res| add_consensus_block_value_header(res, consensus_block_value)), + _ => Ok(warp::reply::json(&ForkVersionedResponse { + version: Some(fork_name), + metadata, + data: block_contents, + }) + .into_response()) + .map(|res| res.into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) + .map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded)) + .map(|res| add_execution_payload_value_header(res, execution_payload_value)) + .map(|res| add_consensus_block_value_header(res, consensus_block_value)), } } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 432d91b723..92d302f56d 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -113,7 +113,10 @@ pub async fn publish_block b, - Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown)) => { + Err(BlockContentsError::BlockError(BlockError::BlockIsAlreadyKnown)) + | Err(BlockContentsError::BlobError( + beacon_chain::blob_verification::GossipBlobError::RepeatBlob { .. }, + )) => { // Allow the status code for duplicate blocks to be overridden based on config. return Ok(warp::reply::with_status( warp::reply::json(&ErrorMessage { @@ -185,6 +188,23 @@ pub async fn publish_block block_clone.slot() ); Err(BlockError::Slashable) + } else if chain_clone + .observed_blob_sidecars + .read() + .proposer_has_been_observed( + block_clone.slot(), + block_clone.message().proposer_index(), + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))? + .is_slashable() + { + warn!( + log_clone, + "Not publishing equivocating blob"; + "slot" => block_clone.slot() + ); + Err(BlockError::Slashable) } else { publish_block( block_clone, diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 7b06901243..343defb809 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,11 +1,15 @@ -use crate::api_types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; use crate::api_types::EndpointVersion; use eth2::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, }; use serde::Serialize; -use types::{ForkName, ForkVersionedResponse, InconsistentFork, Uint256}; +use types::{ + fork_versioned_response::{ + ExecutionOptimisticFinalizedForkVersionedResponse, ExecutionOptimisticFinalizedMetadata, + }, + ForkName, ForkVersionedResponse, InconsistentFork, Uint256, +}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); @@ -26,6 +30,7 @@ pub fn fork_versioned_response( }; Ok(ForkVersionedResponse { version: fork_name, + metadata: Default::default(), data, }) } @@ -46,8 +51,10 @@ pub fn execution_optimistic_finalized_fork_versioned_response( }; Ok(ExecutionOptimisticFinalizedForkVersionedResponse { version: fork_name, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), + metadata: ExecutionOptimisticFinalizedMetadata { + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, data, }) } @@ -73,12 +80,12 @@ pub fn add_execution_payload_blinded_header( /// Add the `Eth-Execution-Payload-Value` header to a response. pub fn add_execution_payload_value_header( reply: T, - execution_payload_value: Option, + execution_payload_value: Uint256, ) -> Response { reply::with_header( reply, EXECUTION_PAYLOAD_VALUE_HEADER, - execution_payload_value.unwrap_or_default().to_string(), + execution_payload_value.to_string(), ) .into_response() } @@ -86,12 +93,12 @@ pub fn add_execution_payload_value_header( /// Add the `Eth-Consensus-Block-Value` header to a response. pub fn add_consensus_block_value_header( reply: T, - consensus_payload_value: Option, + consensus_payload_value: u64, ) -> Response { reply::with_header( reply, CONSENSUS_BLOCK_VALUE_HEADER, - consensus_payload_value.unwrap_or_default().to_string(), + consensus_payload_value.to_string(), ) .into_response() } diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 48a2f450e2..945f538a3a 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -4,6 +4,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, ChainConfig, }; +use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; use http_api::test_utils::InteractiveTester; @@ -21,8 +22,6 @@ use types::{ MinimalEthSpec, ProposerPreparationData, Slot, }; -use eth2::types::ForkVersionedBeaconBlockType::{Blinded, Full}; - type E = MainnetEthSpec; // Test that the deposit_contract endpoint returns the correct chain_id and address. @@ -113,8 +112,8 @@ async fn state_by_root_pruned_from_fork_choice() { .unwrap() .unwrap(); - assert!(response.finalized.unwrap()); - assert!(!response.execution_optimistic.unwrap()); + assert!(response.metadata.finalized.unwrap()); + assert!(!response.metadata.execution_optimistic.unwrap()); let mut state = response.data; assert_eq!(state.update_tree_hash_cache().unwrap(), state_root); @@ -619,15 +618,17 @@ pub async fn proposer_boost_re_org_test( let randao_reveal = harness .sign_randao_reveal(&state_b, proposer_index, slot_c) .into(); - let unsigned_block_type = tester + let (unsigned_block_type, _) = tester .client .get_validator_blocks_v3::(slot_c, &randao_reveal, None) .await .unwrap(); - let (unsigned_block_c, block_c_blobs) = match unsigned_block_type { - Full(unsigned_block_contents_c) => unsigned_block_contents_c.data.deconstruct(), - Blinded(_) => { + let (unsigned_block_c, block_c_blobs) = match unsigned_block_type.data { + ProduceBlockV3Response::Full(unsigned_block_contents_c) => { + unsigned_block_contents_c.deconstruct() + } + ProduceBlockV3Response::Blinded(_) => { panic!("Should not be a blinded block"); } }; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index ebd681b59f..1b694ecf9d 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -7,7 +7,9 @@ use environment::null_logger; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, - types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, + types::{ + BlockId as CoreBlockId, ForkChoiceNode, ProduceBlockV3Response, StateId as CoreStateId, *, + }, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; use execution_layer::test_utils::{ @@ -38,8 +40,6 @@ use types::{ MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, }; -use eth2::types::ForkVersionedBeaconBlockType::{Blinded, Full}; - type E = MainnetEthSpec; const SECONDS_PER_SLOT: u64 = 12; @@ -655,6 +655,7 @@ impl ApiTester { .await .unwrap() .unwrap() + .metadata .finalized .unwrap(); @@ -691,6 +692,7 @@ impl ApiTester { .await .unwrap() .unwrap() + .metadata .finalized .unwrap(); @@ -728,6 +730,7 @@ impl ApiTester { .await .unwrap() .unwrap() + .metadata .finalized .unwrap(); @@ -2725,52 +2728,57 @@ impl ApiTester { sk.sign(message).into() }; - let (fork_version_response_bytes, is_blinded_payload) = self + let (response, metadata) = self .client .get_validator_blocks_v3_ssz::(slot, &randao_reveal, None) .await .unwrap(); - if is_blinded_payload { - let blinded_block = >::from_ssz_bytes( - &fork_version_response_bytes.unwrap(), - &self.chain.spec, - ) - .expect("block contents bytes can be decoded"); + match response { + ProduceBlockV3Response::Blinded(blinded_block) => { + assert!(metadata.execution_payload_blinded); + assert_eq!( + metadata.consensus_version, + blinded_block.to_ref().fork_name(&self.chain.spec).unwrap() + ); + let signed_blinded_block = + blinded_block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); - let signed_blinded_block = - blinded_block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + self.client + .post_beacon_blinded_blocks_ssz(&signed_blinded_block) + .await + .unwrap(); - self.client - .post_beacon_blinded_blocks_ssz(&signed_blinded_block) - .await - .unwrap(); + let head_block = self.chain.head_beacon_block().clone_as_blinded(); + assert_eq!(head_block, signed_blinded_block); - let head_block = self.chain.head_beacon_block().clone_as_blinded(); - assert_eq!(head_block, signed_blinded_block); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + ProduceBlockV3Response::Full(block_contents) => { + assert!(!metadata.execution_payload_blinded); + assert_eq!( + metadata.consensus_version, + block_contents + .block() + .to_ref() + .fork_name(&self.chain.spec) + .unwrap() + ); + let signed_block_contents = + block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); - self.chain.slot_clock.set_slot(slot.as_u64() + 1); - } else { - let block_contents = >::from_ssz_bytes( - &fork_version_response_bytes.unwrap(), - &self.chain.spec, - ) - .expect("block contents bytes can be decoded"); + self.client + .post_beacon_blocks_ssz(&signed_block_contents) + .await + .unwrap(); - let signed_block_contents = - block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + assert_eq!( + self.chain.head_beacon_block().as_ref(), + signed_block_contents.signed_block() + ); - self.client - .post_beacon_blocks_ssz(&signed_block_contents) - .await - .unwrap(); - - assert_eq!( - self.chain.head_beacon_block().as_ref(), - signed_block_contents.signed_block() - ); - - self.chain.slot_clock.set_slot(slot.as_u64() + 1); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } } } @@ -3543,15 +3551,17 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), - Full(_) => panic!("Expecting a blinded payload"), + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); @@ -3645,15 +3655,17 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), - Full(_) => panic!("Expecting a blinded payload"), + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); @@ -3719,15 +3731,17 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), - Full(_) => panic!("Expecting a blinded payload"), + let payload: BlindedPayload = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => { + payload.body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; assert_eq!(payload.fee_recipient(), test_fee_recipient); @@ -3807,21 +3821,17 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a blinded payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a blinded payload"), }; assert_eq!(payload.parent_hash(), expected_parent_hash); @@ -3897,21 +3907,17 @@ impl ApiTester { .unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a full payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; assert_eq!(payload.prev_randao(), expected_prev_randao); @@ -3987,21 +3993,17 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a full payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; assert_eq!(payload.block_number(), expected_block_number); @@ -4075,21 +4077,17 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a blinded payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a blinded payload"), }; assert!(payload.timestamp() > min_expected_timestamp); @@ -4135,15 +4133,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4201,15 +4199,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4309,15 +4307,15 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Blinded(_) => (), - Full(_) => panic!("Expecting a blinded payload"), + match payload_type.data { + ProduceBlockV3Response::Blinded(_) => (), + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; // Without proposing, advance into the next slot, this should make us cross the threshold @@ -4329,15 +4327,15 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4457,15 +4455,15 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this @@ -4487,15 +4485,15 @@ impl ApiTester { .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) .await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(next_slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Blinded(_) => (), - Full(_) => panic!("Expecting a blinded payload"), + match payload_type.data { + ProduceBlockV3Response::Blinded(_) => (), + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; self @@ -4567,21 +4565,17 @@ impl ApiTester { let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - let payload: FullPayload = match payload_type { - Full(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), - Blinded(_) => panic!("Expecting a full payload"), + let payload: FullPayload = match payload_type.data { + ProduceBlockV3Response::Full(payload) => { + payload.block().body().execution_payload().unwrap().into() + } + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); @@ -4640,15 +4634,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4704,15 +4698,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Blinded(_) => (), - Full(_) => panic!("Expecting a blinded payload"), + match payload_type.data { + ProduceBlockV3Response::Blinded(_) => (), + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; self @@ -4768,15 +4762,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4832,15 +4826,15 @@ impl ApiTester { let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self @@ -4894,15 +4888,15 @@ impl ApiTester { let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - let _block_contents = match payload_type { - Blinded(payload) => payload.data, - Full(_) => panic!("Expecting a blinded payload"), + let _block_contents = match payload_type.data { + ProduceBlockV3Response::Blinded(payload) => payload, + ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; self @@ -4966,15 +4960,15 @@ impl ApiTester { let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let payload_type = self + let (payload_type, _) = self .client .get_validator_blocks_v3::(slot, &randao_reveal, None) .await .unwrap(); - match payload_type { - Full(_) => (), - Blinded(_) => panic!("Expecting a full payload"), + match payload_type.data { + ProduceBlockV3Response::Full(_) => (), + ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; self diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 0eb3f7bc80..3c0b4c7235 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -158,6 +158,10 @@ pub struct Config { /// Configuration for the inbound rate limiter (requests received by this node). pub inbound_rate_limiter_config: Option, + + /// Whether to disable logging duplicate gossip messages as WARN. If set to true, duplicate + /// errors will be logged at DEBUG level. + pub disable_duplicate_warn_logs: bool, } impl Config { @@ -375,6 +379,7 @@ impl Default for Config { outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, + disable_duplicate_warn_logs: false, } } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 0894dc65bd..fbe686aa2a 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -35,6 +35,7 @@ pub use libp2p::{ use lru::LruCache; use slog::{crit, debug, error, info, trace, warn}; use ssz::Encode; +use std::num::NonZeroUsize; use std::{ collections::{HashMap, VecDeque}, net::{IpAddr, SocketAddr}, @@ -49,6 +50,7 @@ use types::{EnrForkId, EthSpec}; mod subnet_predicate; pub use subnet_predicate::subnet_predicate; +use types::non_zero_usize::new_non_zero_usize; /// Local ENR storage filename. pub const ENR_FILENAME: &str = "enr.dat"; @@ -70,6 +72,8 @@ const MAX_SUBNETS_IN_QUERY: usize = 3; pub const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16; /// The threshold for updating `min_ttl` on a connected peer. const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); +/// The capacity of the Discovery ENR cache. +const ENR_CACHE_CAPACITY: NonZeroUsize = new_non_zero_usize(50); /// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl` /// of the peer if it is specified. @@ -318,7 +322,7 @@ impl Discovery { }; Ok(Self { - cached_enrs: LruCache::new(50), + cached_enrs: LruCache::new(ENR_CACHE_CAPACITY), network_globals, find_peer_active: false, queued_queries: VecDeque::with_capacity(10), diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 3c2a3f5a95..d15bde37d9 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -128,6 +128,8 @@ pub struct Network { gossip_cache: GossipCache, /// This node's PeerId. pub local_peer_id: PeerId, + /// Flag to disable warning logs for duplicate gossip messages and log at DEBUG level instead. + pub disable_duplicate_warn_logs: bool, /// Logger for behaviour actions. log: slog::Logger, } @@ -425,6 +427,7 @@ impl Network { update_gossipsub_scores, gossip_cache, local_peer_id, + disable_duplicate_warn_logs: config.disable_duplicate_warn_logs, log, }; @@ -743,7 +746,21 @@ impl Network { .gossipsub_mut() .publish(Topic::from(topic.clone()), message_data.clone()) { - slog::warn!(self.log, "Could not publish message"; "error" => ?e); + if self.disable_duplicate_warn_logs && matches!(e, PublishError::Duplicate) { + debug!( + self.log, + "Could not publish message"; + "error" => ?e, + "kind" => %topic.kind(), + ); + } else { + warn!( + self.log, + "Could not publish message"; + "error" => ?e, + "kind" => %topic.kind(), + ); + }; // add to metrics match topic.kind() { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 214accd3fd..0fc485b152 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1288,5 +1288,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("64") .takes_value(true) ) + .arg( + Arg::with_name("disable-duplicate-warn-logs") + .long("disable-duplicate-warn-logs") + .help("Disable warning logs for duplicate gossip messages. The WARN level log is \ + useful for detecting a duplicate validator key running elsewhere. However, this may \ + result in excessive warning logs if the validator is broadcasting messages to \ + multiple beacon nodes via the validator client --broadcast flag. In this case, \ + disabling these warn logs may be useful.") + .takes_value(false) + ) .group(ArgGroup::with_name("enable_http").args(&["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 9ceab47f33..7c48ae7609 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1395,6 +1395,9 @@ pub fn set_network_config( Some(config_str.parse()?) } }; + + config.disable_duplicate_warn_logs = cli_args.is_present("disable-duplicate-warn-logs"); + Ok(()) } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 4e516b091b..681d424e28 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -2,12 +2,14 @@ use crate::{DBColumn, Error, StoreItem}; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use std::num::NonZeroUsize; +use types::non_zero_usize::new_non_zero_usize; use types::{EthSpec, MinimalEthSpec}; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; -pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; -pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1; +pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(5); +pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1); pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1; pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; @@ -19,9 +21,9 @@ pub struct StoreConfig { /// Flag indicating whether the `slots_per_restore_point` was set explicitly by the user. pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. - pub block_cache_size: usize, + pub block_cache_size: NonZeroUsize, /// Maximum number of states from freezer database to store in the in-memory state cache. - pub historic_state_cache_size: usize, + pub historic_state_cache_size: NonZeroUsize, /// Whether to compact the database on initialization. pub compact_on_init: bool, /// Whether to compact the database during database pruning. diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 208dcfdb04..f73c19401d 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -35,6 +35,7 @@ use state_processing::{ use std::cmp::min; use std::convert::TryInto; use std::marker::PhantomData; +use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; @@ -85,7 +86,7 @@ struct BlockCache { } impl BlockCache { - pub fn new(size: usize) -> Self { + pub fn new(size: NonZeroUsize) -> Self { Self { block_cache: LruCache::new(size), blob_cache: LruCache::new(size), diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 2a08f90174..5ab7c3849b 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -29,6 +29,11 @@ FLAGS: --disable-deposit-contract-sync Explictly disables syncing of deposit logs from the execution node. This overrides any previous option that depends on it. Useful if you intend to run a non-validating beacon node. + --disable-duplicate-warn-logs Disable warning logs for duplicate gossip messages. The WARN level log is + useful for detecting a duplicate validator key running elsewhere. + However, this may result in excessive warning logs if the validator is + broadcasting messages to multiple beacon nodes via the validator client + --broadcast flag. In this case, disabling these warn logs may be useful. -x, --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. This disables this feature, fixing the ENR's IP/PORT to those specified on boot. diff --git a/book/src/redundancy.md b/book/src/redundancy.md index 93529295ae..8318aea21e 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -101,6 +101,10 @@ from this list: - `none`: Disable all broadcasting. This option only has an effect when provided alone, otherwise it is ignored. Not recommended except for expert tweakers. +Broadcasting attestation, blocks and sync committee messages may result in excessive warning logs in the beacon node +due to duplicate gossip messages. In this case, it may be desirable to disable warning logs for duplicates using the +beacon node `--disable-duplicate-warn-logs` flag. + The default is `--broadcast subscriptions`. To also broadcast blocks for example, use `--broadcast subscriptions,blocks`. diff --git a/bors.toml b/bors.toml deleted file mode 100644 index e821b89a81..0000000000 --- a/bors.toml +++ /dev/null @@ -1,24 +0,0 @@ -status = [ - "release-tests-ubuntu", - "release-tests-windows", - "debug-tests-ubuntu", - "state-transition-vectors-ubuntu", - "ef-tests-ubuntu", - "dockerfile-ubuntu", - "eth1-simulator-ubuntu", - "merge-transition-ubuntu", - "no-eth1-simulator-ubuntu", - "check-code", - "cargo-udeps", - "beacon-chain-tests", - "op-pool-tests", - "doppelganger-protection-test", - "execution-engine-integration-ubuntu", - "check-msrv", - "slasher-tests", - "syncing-simulator-ubuntu", - "compile-with-beta-compiler", -] -use_squash_merge = true -timeout_sec = 10800 -pr_status = ["license/cla", "target-branch-check"] diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 8dc0888e6e..61c65a29ba 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -367,7 +367,8 @@ impl ValidatorDefinitions { pub fn save>(&self, validators_dir: P) -> Result<(), Error> { let config_path = validators_dir.as_ref().join(CONFIG_FILENAME); let temp_path = validators_dir.as_ref().join(CONFIG_TEMP_FILENAME); - let bytes = serde_yaml::to_vec(self).map_err(Error::UnableToEncodeFile)?; + let mut bytes = vec![]; + serde_yaml::to_writer(&mut bytes, self).map_err(Error::UnableToEncodeFile)?; write_file_via_temporary(&config_path, &temp_path, &bytes) .map_err(Error::UnableToWriteFile)?; diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 80eafa1209..f568c422bb 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -32,6 +32,7 @@ use serde::{de::DeserializeOwned, Serialize}; use ssz::Encode; use std::convert::TryFrom; use std::fmt; +use std::future::Future; use std::iter::Iterator; use std::path::PathBuf; use std::time::Duration; @@ -68,6 +69,8 @@ pub enum Error { InvalidJson(serde_json::Error), /// The server returned an invalid server-sent event. InvalidServerSentEvent(String), + /// The server sent invalid response headers. + InvalidHeaders(String), /// The server returned an invalid SSZ response. InvalidSsz(ssz::DecodeError), /// An I/O error occurred while loading an API token from disk. @@ -98,6 +101,7 @@ impl Error { Error::MissingSignatureHeader => None, Error::InvalidJson(_) => None, Error::InvalidServerSentEvent(_) => None, + Error::InvalidHeaders(_) => None, Error::InvalidSsz(_) => None, Error::TokenReadError(..) => None, Error::NoServerPubkey | Error::NoToken => None, @@ -125,7 +129,7 @@ pub struct Timeouts { pub get_beacon_blocks_ssz: Duration, pub get_debug_beacon_states: Duration, pub get_deposit_snapshot: Duration, - pub get_validator_block_ssz: Duration, + pub get_validator_block: Duration, } impl Timeouts { @@ -141,7 +145,7 @@ impl Timeouts { get_beacon_blocks_ssz: timeout, get_debug_beacon_states: timeout, get_deposit_snapshot: timeout, - get_validator_block_ssz: timeout, + get_validator_block: timeout, } } } @@ -278,27 +282,28 @@ impl BeaconNodeHttpClient { } /// Perform a HTTP GET request using an 'accept' header, returning `None` on a 404 error. - pub async fn get_bytes_response_with_response_headers( + pub async fn get_response_with_response_headers( &self, url: U, accept_header: Accept, timeout: Duration, - ) -> Result<(Option>, Option), Error> { + parser: impl FnOnce(Response, HeaderMap) -> F, + ) -> Result, Error> + where + F: Future>, + { let opt_response = self .get_response(url, |b| b.accept(accept_header).timeout(timeout)) .await .optional()?; - // let headers = opt_response.headers(); match opt_response { Some(resp) => { let response_headers = resp.headers().clone(); - Ok(( - Some(resp.bytes().await?.into_iter().collect::>()), - Some(response_headers), - )) + let parsed_response = parser(resp, response_headers).await?; + Ok(Some(parsed_response)) } - None => Ok((None, None)), + None => Ok(None), } } @@ -1821,7 +1826,7 @@ impl BeaconNodeHttpClient { } /// returns `GET v3/validator/blocks/{slot}` URL path - pub async fn get_validator_blocks_v3_path( + pub async fn get_validator_blocks_v3_path( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1858,7 +1863,7 @@ impl BeaconNodeHttpClient { slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result, Error> { + ) -> Result<(JsonProduceBlockV3Response, ProduceBlockV3Metadata), Error> { self.get_validator_blocks_v3_modular( slot, randao_reveal, @@ -1875,35 +1880,41 @@ impl BeaconNodeHttpClient { randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result, Error> { + ) -> Result<(JsonProduceBlockV3Response, ProduceBlockV3Metadata), Error> { let path = self - .get_validator_blocks_v3_path::( - slot, - randao_reveal, - graffiti, - skip_randao_verification, + .get_validator_blocks_v3_path(slot, randao_reveal, graffiti, skip_randao_verification) + .await?; + + let opt_result = self + .get_response_with_response_headers( + path, + Accept::Json, + self.timeouts.get_validator_block, + |response, headers| async move { + let header_metadata = ProduceBlockV3Metadata::try_from(&headers) + .map_err(Error::InvalidHeaders)?; + if header_metadata.execution_payload_blinded { + let blinded_response = response + .json::, + ProduceBlockV3Metadata>>() + .await? + .map_data(ProduceBlockV3Response::Blinded); + Ok((blinded_response, header_metadata)) + } else { + let full_block_response= response + .json::, + ProduceBlockV3Metadata>>() + .await? + .map_data(ProduceBlockV3Response::Full); + Ok((full_block_response, header_metadata)) + } + }, ) .await?; - let response = self.get_response(path, |b| b).await?; - - let is_blinded_payload = response - .headers() - .get(EXECUTION_PAYLOAD_BLINDED_HEADER) - .map(|value| value.to_str().unwrap_or_default().to_lowercase() == "true") - .unwrap_or(false); - - if is_blinded_payload { - let blinded_payload = response - .json::>>() - .await?; - Ok(ForkVersionedBeaconBlockType::Blinded(blinded_payload)) - } else { - let full_payload = response - .json::>>() - .await?; - Ok(ForkVersionedBeaconBlockType::Full(full_payload)) - } + // Generic handler is optional but this route should never 404 unless unimplemented, so + // treat that as an error. + opt_result.ok_or(Error::StatusCode(StatusCode::NOT_FOUND)) } /// `GET v3/validator/blocks/{slot}` in ssz format @@ -1912,7 +1923,7 @@ impl BeaconNodeHttpClient { slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result<(Option>, bool), Error> { + ) -> Result<(ProduceBlockV3Response, ProduceBlockV3Metadata), Error> { self.get_validator_blocks_v3_modular_ssz::( slot, randao_reveal, @@ -1929,33 +1940,48 @@ impl BeaconNodeHttpClient { randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result<(Option>, bool), Error> { + ) -> Result<(ProduceBlockV3Response, ProduceBlockV3Metadata), Error> { let path = self - .get_validator_blocks_v3_path::( - slot, - randao_reveal, - graffiti, - skip_randao_verification, - ) + .get_validator_blocks_v3_path(slot, randao_reveal, graffiti, skip_randao_verification) .await?; - let (response_content, response_headers) = self - .get_bytes_response_with_response_headers( + let opt_response = self + .get_response_with_response_headers( path, Accept::Ssz, - self.timeouts.get_validator_block_ssz, + self.timeouts.get_validator_block, + |response, headers| async move { + let metadata = ProduceBlockV3Metadata::try_from(&headers) + .map_err(Error::InvalidHeaders)?; + let response_bytes = response.bytes().await?; + + // Parse bytes based on metadata. + let response = if metadata.execution_payload_blinded { + ProduceBlockV3Response::Blinded( + BlindedBeaconBlock::from_ssz_bytes_for_fork( + &response_bytes, + metadata.consensus_version, + ) + .map_err(Error::InvalidSsz)?, + ) + } else { + ProduceBlockV3Response::Full( + FullBlockContents::from_ssz_bytes_for_fork( + &response_bytes, + metadata.consensus_version, + ) + .map_err(Error::InvalidSsz)?, + ) + }; + + Ok((response, metadata)) + }, ) .await?; - let is_blinded_payload = match response_headers { - Some(headers) => headers - .get(EXECUTION_PAYLOAD_BLINDED_HEADER) - .map(|value| value.to_str().unwrap_or_default().to_lowercase() == "true") - .unwrap_or(false), - None => false, - }; - - Ok((response_content, is_blinded_payload)) + // Generic handler is optional but this route should never 404 unless unimplemented, so + // treat that as an error. + opt_response.ok_or(Error::StatusCode(StatusCode::NOT_FOUND)) } /// `GET v2/validator/blocks/{slot}` in ssz format @@ -1986,7 +2012,7 @@ impl BeaconNodeHttpClient { .get_validator_blocks_path::(slot, randao_reveal, graffiti, skip_randao_verification) .await?; - self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block) .await } @@ -2090,7 +2116,7 @@ impl BeaconNodeHttpClient { ) .await?; - self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block) .await } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index d8086784b1..259aa2cfba 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1,9 +1,13 @@ //! This module exposes a superset of the `types` crate. It adds additional types that are only //! required for the HTTP API. -use crate::Error as ServerError; +use crate::{ + Error as ServerError, CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, + EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, +}; use lighthouse_network::{ConnectionDirection, Enr, Multiaddr, PeerConnectionStatus}; use mediatype::{names, MediaType, MediaTypeList}; +use reqwest::header::HeaderMap; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use ssz::{Decode, DecodeError}; @@ -1430,11 +1434,6 @@ pub mod serde_status_code { } } -pub enum ForkVersionedBeaconBlockType { - Full(ForkVersionedResponse>), - Blinded(ForkVersionedResponse>), -} - #[cfg(test)] mod tests { use super::*; @@ -1521,6 +1520,9 @@ pub enum ProduceBlockV3Response { Blinded(BlindedBeaconBlock), } +pub type JsonProduceBlockV3Response = + ForkVersionedResponse, ProduceBlockV3Metadata>; + /// A wrapper over a [`BeaconBlock`] or a [`BlockContents`]. #[derive(Debug, Encode, Serialize, Deserialize)] #[serde(untagged)] @@ -1535,6 +1537,27 @@ pub enum FullBlockContents { pub type BlockContentsTuple = (BeaconBlock, Option<(KzgProofs, BlobsList)>); +// This value should never be used +fn dummy_consensus_version() -> ForkName { + ForkName::Base +} + +/// Metadata about a `ProduceBlockV3Response` which is returned in the body & headers. +#[derive(Debug, Deserialize, Serialize)] +pub struct ProduceBlockV3Metadata { + // The consensus version is serialized & deserialized by `ForkVersionedResponse`. + #[serde( + skip_serializing, + skip_deserializing, + default = "dummy_consensus_version" + )] + pub consensus_version: ForkName, + pub execution_payload_blinded: bool, + pub execution_payload_value: Uint256, + #[serde(with = "serde_utils::quoted_u64")] + pub consensus_block_value: u64, +} + impl FullBlockContents { pub fn new(block: BeaconBlock, blob_data: Option<(KzgProofs, BlobsList)>) -> Self { match blob_data { @@ -1556,13 +1579,19 @@ impl FullBlockContents { len: bytes.len(), expected: slot_len, })?; - let slot = Slot::from_ssz_bytes(slot_bytes)?; let fork_at_slot = spec.fork_name_at_slot::(slot); + Self::from_ssz_bytes_for_fork(bytes, fork_at_slot) + } - match fork_at_slot { + /// SSZ decode with fork variant passed in explicitly. + pub fn from_ssz_bytes_for_fork( + bytes: &[u8], + fork_name: ForkName, + ) -> Result { + match fork_name { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BeaconBlock::from_ssz_bytes(bytes, spec) + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) .map(|block| FullBlockContents::Block(block)) } ForkName::Deneb => { @@ -1573,8 +1602,9 @@ impl FullBlockContents { builder.register_type::>()?; let mut decoder = builder.build()?; - let block = - decoder.decode_next_with(|bytes| BeaconBlock::from_ssz_bytes(bytes, spec))?; + let block = decoder.decode_next_with(|bytes| { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + })?; let kzg_proofs = decoder.decode_next()?; let blobs = decoder.decode_next()?; @@ -1643,6 +1673,52 @@ impl Into> for FullBlockContents { pub type SignedBlockContentsTuple = (SignedBeaconBlock, Option<(KzgProofs, BlobsList)>); +fn parse_required_header( + headers: &HeaderMap, + header_name: &str, + parse: impl FnOnce(&str) -> Result, +) -> Result { + let str_value = headers + .get(header_name) + .ok_or_else(|| format!("missing required header {header_name}"))? + .to_str() + .map_err(|e| format!("invalid value in {header_name}: {e}"))?; + parse(str_value) +} + +impl TryFrom<&HeaderMap> for ProduceBlockV3Metadata { + type Error = String; + + fn try_from(headers: &HeaderMap) -> Result { + let consensus_version = parse_required_header(headers, CONSENSUS_VERSION_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {CONSENSUS_VERSION_HEADER}: {e:?}")) + })?; + let execution_payload_blinded = + parse_required_header(headers, EXECUTION_PAYLOAD_BLINDED_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {EXECUTION_PAYLOAD_BLINDED_HEADER}: {e:?}")) + })?; + let execution_payload_value = + parse_required_header(headers, EXECUTION_PAYLOAD_VALUE_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {EXECUTION_PAYLOAD_VALUE_HEADER}: {e:?}")) + })?; + let consensus_block_value = + parse_required_header(headers, CONSENSUS_BLOCK_VALUE_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {CONSENSUS_BLOCK_VALUE_HEADER}: {e:?}")) + })?; + + Ok(ProduceBlockV3Metadata { + consensus_version, + execution_payload_blinded, + execution_payload_value, + consensus_block_value, + }) + } +} + /// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBlockContents`]. #[derive(Clone, Debug, Encode, Serialize, Deserialize)] #[serde(untagged)] diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 7215c779e7..90dff84b39 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -112,12 +112,15 @@ impl> BeaconBlock { let slot = Slot::from_ssz_bytes(slot_bytes)?; let fork_at_slot = spec.fork_name_at_slot::(slot); + Self::from_ssz_bytes_for_fork(bytes, fork_at_slot) + } - Ok(map_fork_name!( - fork_at_slot, - Self, - <_>::from_ssz_bytes(bytes)? - )) + /// Custom SSZ decoder that takes a `ForkName` as context. + pub fn from_ssz_bytes_for_fork( + bytes: &[u8], + fork_name: ForkName, + ) -> Result { + Ok(map_fork_name!(fork_name, Self, <_>::from_ssz_bytes(bytes)?)) } /// Try decoding each beacon block variant in sequence. diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index 2d97dc1219..195c083e29 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -4,47 +4,6 @@ use serde::{Deserialize, Deserializer, Serialize}; use serde_json::value::Value; use std::sync::Arc; -// Deserialize is only implemented for types that implement ForkVersionDeserialize -#[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ExecutionOptimisticFinalizedForkVersionedResponse { - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, - pub execution_optimistic: Option, - pub finalized: Option, - pub data: T, -} - -impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticFinalizedForkVersionedResponse -where - F: ForkVersionDeserialize, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - struct Helper { - version: Option, - execution_optimistic: Option, - finalized: Option, - data: serde_json::Value, - } - - let helper = Helper::deserialize(deserializer)?; - let data = match helper.version { - Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, - None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, - }; - - Ok(ExecutionOptimisticFinalizedForkVersionedResponse { - version: helper.version, - execution_optimistic: helper.execution_optimistic, - finalized: helper.finalized, - data, - }) - } -} - pub trait ForkVersionDeserialize: Sized + DeserializeOwned { fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: Value, @@ -52,17 +11,41 @@ pub trait ForkVersionDeserialize: Sized + DeserializeOwned { ) -> Result; } -// Deserialize is only implemented for types that implement ForkVersionDeserialize +/// Deserialize is only implemented for types that implement ForkVersionDeserialize. +/// +/// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than +/// version. If you *do* care about adding other fields you can mix in any type that implements +/// `Deserialize`. #[derive(Debug, PartialEq, Clone, Serialize)] -pub struct ForkVersionedResponse { +pub struct ForkVersionedResponse { #[serde(skip_serializing_if = "Option::is_none")] pub version: Option, + #[serde(flatten)] + pub metadata: M, pub data: T, } -impl<'de, F> serde::Deserialize<'de> for ForkVersionedResponse +/// Metadata type similar to unit (i.e. `()`) but deserializes from a map (`serde_json::Value`). +/// +/// Unfortunately the braces are semantically significant, i.e. `struct EmptyMetadata;` does not +/// work. +#[derive(Debug, PartialEq, Clone, Default, Deserialize, Serialize)] +pub struct EmptyMetadata {} + +/// Fork versioned response with extra information about finalization & optimistic execution. +pub type ExecutionOptimisticFinalizedForkVersionedResponse = + ForkVersionedResponse; + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct ExecutionOptimisticFinalizedMetadata { + pub execution_optimistic: Option, + pub finalized: Option, +} + +impl<'de, F, M> serde::Deserialize<'de> for ForkVersionedResponse where F: ForkVersionDeserialize, + M: DeserializeOwned, { fn deserialize(deserializer: D) -> Result where @@ -71,6 +54,8 @@ where #[derive(Deserialize)] struct Helper { version: Option, + #[serde(flatten)] + metadata: serde_json::Value, data: serde_json::Value, } @@ -79,9 +64,11 @@ where Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?, None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?, }; + let metadata = serde_json::from_value(helper.metadata).map_err(serde::de::Error::custom)?; Ok(ForkVersionedResponse { version: helper.version, + metadata, data, }) } @@ -98,6 +85,22 @@ impl ForkVersionDeserialize for Arc { } } +impl ForkVersionedResponse { + /// Apply a function to the inner `data`, potentially changing its type. + pub fn map_data(self, f: impl FnOnce(T) -> U) -> ForkVersionedResponse { + let ForkVersionedResponse { + version, + metadata, + data, + } = self; + ForkVersionedResponse { + version, + metadata, + data: f(data), + } + } +} + #[cfg(test)] mod fork_version_response_tests { use crate::{ @@ -112,6 +115,7 @@ mod fork_version_response_tests { let response_json = serde_json::to_string(&json!(ForkVersionedResponse::> { version: Some(ForkName::Merge), + metadata: Default::default(), data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), })) .unwrap(); @@ -129,6 +133,7 @@ mod fork_version_response_tests { let response_json = serde_json::to_string(&json!(ForkVersionedResponse::> { version: Some(ForkName::Capella), + metadata: Default::default(), data: ExecutionPayload::Merge(ExecutionPayloadMerge::default()), })) .unwrap(); diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 2322a67a62..52be2fff46 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -100,6 +100,7 @@ pub mod sqlite; pub mod blob_sidecar; pub mod light_client_header; +pub mod non_zero_usize; use ethereum_types::{H160, H256}; diff --git a/consensus/types/src/non_zero_usize.rs b/consensus/types/src/non_zero_usize.rs new file mode 100644 index 0000000000..d61000c9a6 --- /dev/null +++ b/consensus/types/src/non_zero_usize.rs @@ -0,0 +1,8 @@ +use std::num::NonZeroUsize; + +pub const fn new_non_zero_usize(x: usize) -> NonZeroUsize { + match NonZeroUsize::new(x) { + Some(n) => n, + None => panic!("Expected a non zero usize."), + } +} diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 1ee80e14fd..2ff4706a91 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.69.0-bullseye AS builder +FROM rust:1.73.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG PORTABLE diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index fd74b1b5b9..621424c305 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -18,6 +18,7 @@ use std::str::FromStr; use std::string::ToString; use std::time::Duration; use tempfile::TempDir; +use types::non_zero_usize::new_non_zero_usize; use types::{ Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, ProgressiveBalancesMode, @@ -1769,14 +1770,19 @@ fn block_cache_size_flag() { CommandLineTest::new() .flag("block-cache-size", Some("4")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.block_cache_size, 4_usize)); + .with_config(|config| assert_eq!(config.store.block_cache_size, new_non_zero_usize(4))); } #[test] fn historic_state_cache_size_flag() { CommandLineTest::new() .flag("historic-state-cache-size", Some("4")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.store.historic_state_cache_size, 4_usize)); + .with_config(|config| { + assert_eq!( + config.store.historic_state_cache_size, + new_non_zero_usize(4) + ) + }); } #[test] fn historic_state_cache_size_default() { @@ -2001,7 +2007,10 @@ fn slasher_attestation_cache_size_flag() { .slasher .as_ref() .expect("Unable to parse Slasher config"); - assert_eq!(slasher_config.attestation_root_cache_size, 10000); + assert_eq!( + slasher_config.attestation_root_cache_size, + new_non_zero_usize(10000) + ); }); } #[test] @@ -2558,3 +2567,22 @@ fn genesis_state_url_value() { assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42)); }); } + +#[test] +fn disable_duplicate_warn_logs_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.disable_duplicate_warn_logs, false); + }); +} + +#[test] +fn disable_duplicate_warn_logs() { + CommandLineTest::new() + .flag("disable-duplicate-warn-logs", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.network.disable_duplicate_warn_logs, true); + }); +} diff --git a/scripts/ci/check-success-job.sh b/scripts/ci/check-success-job.sh new file mode 100755 index 0000000000..dfa5c03257 --- /dev/null +++ b/scripts/ci/check-success-job.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Check that $SUCCESS_JOB depends on all other jobs in the given $YAML + +set -euf -o pipefail + +YAML=$1 +SUCCESS_JOB=$2 + +yq '... comments="" | .jobs | map(. | key) | .[]' < "$YAML" | grep -v "$SUCCESS_JOB" | sort > all_jobs.txt +yq "... comments=\"\" | .jobs.$SUCCESS_JOB.needs[]" < "$YAML" | grep -v "$SUCCESS_JOB" | sort > dep_jobs.txt +diff all_jobs.txt dep_jobs.txt || (echo "COMPLETENESS CHECK FAILED" && exit 1) +rm all_jobs.txt dep_jobs.txt +echo "COMPLETENESS CHECK PASSED" diff --git a/slasher/src/config.rs b/slasher/src/config.rs index 894760d277..4fd74343e7 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -1,7 +1,9 @@ use crate::Error; use serde::{Deserialize, Serialize}; +use std::num::NonZeroUsize; use std::path::PathBuf; use strum::{Display, EnumString, EnumVariantNames}; +use types::non_zero_usize::new_non_zero_usize; use types::{Epoch, EthSpec, IndexedAttestation}; pub const DEFAULT_CHUNK_SIZE: usize = 16; @@ -10,7 +12,7 @@ pub const DEFAULT_HISTORY_LENGTH: usize = 4096; pub const DEFAULT_UPDATE_PERIOD: u64 = 12; pub const DEFAULT_SLOT_OFFSET: f64 = 10.5; pub const DEFAULT_MAX_DB_SIZE: usize = 256 * 1024; // 256 GiB -pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: usize = 100_000; +pub const DEFAULT_ATTESTATION_ROOT_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(100_000); pub const DEFAULT_BROADCAST: bool = false; #[cfg(all(feature = "mdbx", not(feature = "lmdb")))] @@ -38,7 +40,7 @@ pub struct Config { /// Maximum size of the database in megabytes. pub max_db_size_mbs: usize, /// Maximum size of the in-memory cache for attestation roots. - pub attestation_root_cache_size: usize, + pub attestation_root_cache_size: NonZeroUsize, /// Whether to broadcast slashings found to the network. pub broadcast: bool, /// Database backend to use. diff --git a/testing/network_testing/README.md b/testing/network_testing/README.md new file mode 100644 index 0000000000..3591514682 --- /dev/null +++ b/testing/network_testing/README.md @@ -0,0 +1,74 @@ +# Lighthouse live network testing + + +## DISCLAIMER + +This document describes how to run a lighthouse node with minimal resources and time on a live +network. + +This procedure should ONLY be used for testing networks and never in production and never with +attached validators. The Lighthouse node described in this state is only a partially functioning +node. + + +## Overview + +We are going to run a single lighthouse node connected to a live network, without syncing and +without an execution engine. This should only ever be done for testing. + +There two main components needed. + +1. A lighthouse node that doesn't sync +2. A fake execution client that does nothing + +We will start with the second + +## Mock-EL + +This is a service that runs and fakes an execution engine. We firstly need to install the lighthouse +`lcli` tool. + +``` +$ make install-lcli +``` + +Once installed, run the fake execution client: + +``` +$ lcli mock-el --jwt-output-path /tmp/mockel.jwt +``` + +This will create a server listening on localhost:8551 + +## Lighthouse no sync + +To create a lighthouse node that doesn't sync we need to compile it with a special flag. + +``` +$ cargo build --release --bin lighthouse --features network/disable-backfill +``` + +Once built, it can run via checkpoint sync on any network, making sure we point to our mock-el + +Prater testnet: + +``` +$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +https://prater.checkpoint.sigp.io --execution-endpoint http://localhost:8551 +``` + +Mainnet: + +``` +$ lighthouse --network prater bn --execution-jwt /tmp/mockel.jwt --checkpoint-sync-url +https://checkpoint.sigp.io --execution-endpoint http://localhost:8551 +``` + +Additional flags, such as metrics may be added. + + +## Additional Notes + +The above is assuming that you have not run the command in the past. If you have a database in +existence for the network you are testing, checkpoint sync will not start. You may need to add the +`--purge-db` flag to remove any past database and force checkpoint sync to run. diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 63c4c8c42c..1a9972b44d 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -83,7 +83,7 @@ const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; -const HTTP_GET_VALIDATOR_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -311,8 +311,7 @@ impl ProductionValidatorClient { / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, - get_validator_block_ssz: slot_duration - / HTTP_GET_VALIDATOR_BLOCK_SSZ_TIMEOUT_QUOTIENT, + get_validator_block: slot_duration / HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT, } } else { Timeouts::set_all(slot_duration) diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 67cbc3cc23..aaaf50aa40 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -21,7 +21,7 @@ types = { workspace = true } eth2 = { workspace = true } beacon_node = { workspace = true } tokio = { workspace = true } -axum = "0.6.18" +axum = "0.7" hyper = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } @@ -41,8 +41,7 @@ tokio-postgres = "0.7.5" http_api = { workspace = true } beacon_chain = { workspace = true } network = { workspace = true } -# TODO: update to 0.15 when released: https://github.com/testcontainers/testcontainers-rs/issues/497 -testcontainers = { git = "https://github.com/testcontainers/testcontainers-rs/", rev = "0f2c9851" } +testcontainers = "0.15" unused_port = { workspace = true } task_executor = { workspace = true } logging = { workspace = true } diff --git a/watch/src/cli.rs b/watch/src/cli.rs index a8e5f3716f..97dc217293 100644 --- a/watch/src/cli.rs +++ b/watch/src/cli.rs @@ -1,6 +1,5 @@ use crate::{config::Config, logger, server, updater}; use clap::{App, Arg}; -use tokio::sync::oneshot; pub const SERVE: &str = "serve"; pub const RUN_UPDATER: &str = "run-updater"; @@ -44,12 +43,9 @@ pub async fn run() -> Result<(), String> { (RUN_UPDATER, Some(_)) => updater::run_updater(config) .await .map_err(|e| format!("Failure: {:?}", e)), - (SERVE, Some(_)) => { - let (_shutdown_tx, shutdown_rx) = oneshot::channel(); - server::serve(config, shutdown_rx) - .await - .map_err(|e| format!("Failure: {:?}", e)) - } + (SERVE, Some(_)) => server::serve(config) + .await + .map_err(|e| format!("Failure: {:?}", e)), _ => Err("Unsupported subcommand. See --help".into()), } } diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs index d1542f7841..0db3df2a0d 100644 --- a/watch/src/server/error.rs +++ b/watch/src/server/error.rs @@ -3,12 +3,14 @@ use axum::Error as AxumError; use axum::{http::StatusCode, response::IntoResponse, Json}; use hyper::Error as HyperError; use serde_json::json; +use std::io::Error as IoError; #[derive(Debug)] pub enum Error { Axum(AxumError), Hyper(HyperError), Database(DbError), + IoError(IoError), BadRequest, NotFound, Other(String), @@ -43,6 +45,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: IoError) -> Self { + Error::IoError(e) + } +} + impl From for Error { fn from(e: String) -> Self { Error::Other(e) diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs index d8ae0eb6c6..25dd242aab 100644 --- a/watch/src/server/mod.rs +++ b/watch/src/server/mod.rs @@ -11,9 +11,8 @@ use axum::{ }; use eth2::types::ErrorMessage; use log::info; -use std::future::Future; -use std::net::SocketAddr; -use tokio::sync::oneshot; +use std::future::{Future, IntoFuture}; +use std::net::{SocketAddr, TcpListener}; pub use config::Config; pub use error::Error; @@ -22,7 +21,7 @@ mod config; mod error; mod handler; -pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Result<(), Error> { +pub async fn serve(config: FullConfig) -> Result<(), Error> { let db = database::build_connection_pool(&config.database)?; let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)? .ok_or_else(|| { @@ -32,9 +31,7 @@ pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Resul ) })?; - let server = start_server(&config, slots_per_epoch as u64, db, async { - let _ = shutdown.await; - })?; + let server = start_server(&config, slots_per_epoch as u64, db)?; server.await?; @@ -61,8 +58,7 @@ pub fn start_server( config: &FullConfig, slots_per_epoch: u64, pool: PgPool, - shutdown: impl Future + Send + Sync + 'static, -) -> Result> + 'static, Error> { +) -> Result> + 'static, Error> { let mut routes = Router::new() .route("/v1/slots", get(handler::get_slots_by_range)) .route("/v1/slots/:slot", get(handler::get_slot)) @@ -108,16 +104,13 @@ pub fn start_server( .layer(Extension(slots_per_epoch)); let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); - - let server = axum::Server::try_bind(&addr)?.serve(app.into_make_service()); - - let server = server.with_graceful_shutdown(async { - shutdown.await; - }); + let listener = TcpListener::bind(addr)?; + listener.set_nonblocking(true)?; + let serve = axum::serve(tokio::net::TcpListener::from_std(listener)?, app); info!("HTTP server listening on {}", addr); - Ok(server) + Ok(serve.into_future()) } // The default route indicating that no available routes matched the request. diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs index dc0b8af6e3..0e29e7f0cd 100644 --- a/watch/tests/tests.rs +++ b/watch/tests/tests.rs @@ -17,7 +17,6 @@ use std::env; use std::net::SocketAddr; use std::time::Duration; use testcontainers::{clients::Cli, core::WaitFor, Image, RunnableImage}; -use tokio::sync::oneshot; use tokio::{runtime, task::JoinHandle}; use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; use types::{Hash256, MainnetEthSpec, Slot}; @@ -188,11 +187,7 @@ impl TesterBuilder { /* * Spawn a Watch HTTP API. */ - let (_watch_shutdown_tx, watch_shutdown_rx) = oneshot::channel(); - let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool, async { - let _ = watch_shutdown_rx.await; - }) - .unwrap(); + let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool).unwrap(); tokio::spawn(watch_server); let addr = SocketAddr::new( @@ -228,7 +223,6 @@ impl TesterBuilder { config: self.config, updater, _bn_network_rx: self._bn_network_rx, - _watch_shutdown_tx, } } async fn initialize_database(&self) -> PgPool { @@ -245,7 +239,6 @@ struct Tester { pub config: Config, pub updater: UpdateHandler, _bn_network_rx: NetworkReceivers, - _watch_shutdown_tx: oneshot::Sender<()>, } impl Tester {