diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 7cda3e477d..a80470cf16 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -420,7 +420,7 @@ jobs: channel: stable cache-target: release - name: Run Makefile to trigger the bash script - run: make cli + run: make cli-local # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. test-suite-success: diff --git a/Cargo.lock b/Cargo.lock index ecbfd0cb8d..0d9da0c7fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -59,19 +59,13 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -161,9 +155,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-consensus" -version = "0.3.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4177d135789e282e925092be8939d421b701c6d92c0a16679faa659d9166289d" +checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -193,9 +187,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.3.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "499ee14d296a133d142efd215eb36bf96124829fe91cf8f5d4e5ccdd381eae00" +checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -210,9 +204,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a767e59c86900dd7c3ce3ecef04f3ace5ac9631ee150beb8b7d22f7fa3bbb2d7" +checksum = "411aff151f2a73124ee473708e82ed51b2535f68928b6a1caa8bc1246ae6f7cd" dependencies = [ "alloy-rlp", "arbitrary", @@ -220,11 +214,11 @@ dependencies = [ "cfg-if", "const-hex", "derive_arbitrary", - "derive_more 0.99.18", + "derive_more 1.0.0", "getrandom", "hex-literal", "itoa", - "k256 0.13.3", + "k256 0.13.4", "keccak-asm", "proptest", "proptest-derive", @@ -328,9 +322,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arbitrary" @@ -488,9 +482,9 @@ checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -567,7 +561,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.35", + "rustix 0.38.37", "slab", "tracing", "windows-sys 0.59.0", @@ -660,9 +654,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "axum" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" dependencies = [ "async-trait", "axum-core", @@ -686,7 +680,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -694,9 +688,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" dependencies = [ "async-trait", "bytes", @@ -707,7 +701,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", "tracing", @@ -715,17 +709,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -794,12 +788,12 @@ dependencies = [ "int_to_bytes", "itertools 0.10.5", "kzg", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", "maplit", "merkle_proof", + "metrics", "oneshot_broadcast", "operation_pool", "parking_lot 0.12.3", @@ -870,9 +864,9 @@ dependencies = [ "fnv", "futures", "itertools 0.10.5", - "lighthouse_metrics", "lighthouse_network", "logging", + "metrics", "num_cpus", "parking_lot 0.12.3", "serde", @@ -1125,9 +1119,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -1208,9 +1202,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.15" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1334,9 +1328,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", @@ -1344,9 +1338,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", @@ -1357,9 +1351,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1409,8 +1403,8 @@ dependencies = [ "http_api", "http_metrics", "kzg", - "lighthouse_metrics", "lighthouse_network", + "metrics", "monitoring_api", "network", "operation_pool", @@ -1529,9 +1523,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -2049,13 +2043,14 @@ dependencies = [ "proc-macro2", "quote", "syn 2.0.77", + "unicode-xid", ] [[package]] name = "diesel" -version = "2.2.3" +version = "2.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e13bab2796f412722112327f3e575601a3e9cdcbe426f0d30dbf43f3f5dc71" +checksum = "158fe8e2e68695bd615d7e4f3227c0727b151330d3e253b525086c348d055d5e" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -2387,7 +2382,7 @@ dependencies = [ "bytes", "ed25519-dalek", "hex", - "k256 0.13.3", + "k256 0.13.4", "log", "rand", "serde", @@ -2397,11 +2392,11 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.77", @@ -2497,9 +2492,9 @@ dependencies = [ "ethereum_ssz_derive", "execution_layer", "futures", - "lighthouse_metrics", "logging", "merkle_proof", + "metrics", "parking_lot 0.12.3", "sensitive_url", "serde", @@ -2997,10 +2992,10 @@ dependencies = [ "jsonwebtoken", "keccak-hash", "kzg", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", + "metrics", "parking_lot 0.12.3", "pretty_reqwest_error", "rand", @@ -3167,7 +3162,7 @@ checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -3198,7 +3193,7 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", - "lighthouse_metrics", + "metrics", "proto_array", "slog", "state_processing", @@ -3325,7 +3320,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", ] @@ -3442,9 +3437,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "git-version" @@ -3871,11 +3866,11 @@ dependencies = [ "futures", "genesis", "hex", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "lru", + "metrics", "network", "operation_pool", "parking_lot 0.12.3", @@ -3905,11 +3900,11 @@ name = "http_metrics" version = "0.1.0" dependencies = [ "beacon_chain", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "malloc_utils", + "metrics", "reqwest", "serde", "slog", @@ -4011,9 +4006,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-util", @@ -4022,13 +4017,15 @@ dependencies = [ "hyper 1.4.1", "pin-project-lite", "tokio", + "tower 0.4.13", + "tower-service", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4267,9 +4264,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "is-terminal" @@ -4369,9 +4366,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa 0.16.9", @@ -4392,9 +4389,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -4825,7 +4822,7 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -4897,7 +4894,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -5035,11 +5032,11 @@ dependencies = [ "eth2_network_config", "ethereum_hashing", "futures", - "lighthouse_metrics", "lighthouse_network", "lighthouse_version", "logging", "malloc_utils", + "metrics", "sensitive_url", "serde", "serde_json", @@ -5056,13 +5053,6 @@ dependencies = [ "validator_manager", ] -[[package]] -name = "lighthouse_metrics" -version = "0.2.0" -dependencies = [ - "prometheus", -] - [[package]] name = "lighthouse_network" version = "0.2.0" @@ -5086,11 +5076,11 @@ dependencies = [ "itertools 0.10.5", "libp2p", "libp2p-mplex", - "lighthouse_metrics", "lighthouse_version", "logging", "lru", "lru_cache", + "metrics", "parking_lot 0.12.3", "prometheus-client", "quickcheck", @@ -5196,7 +5186,7 @@ name = "logging" version = "0.2.0" dependencies = [ "chrono", - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "serde", "serde_json", @@ -5252,7 +5242,7 @@ name = "malloc_utils" version = "0.1.0" dependencies = [ "libc", - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "tikv-jemalloc-ctl", "tikv-jemallocator", @@ -5368,6 +5358,13 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "metrics" +version = "0.2.0" +dependencies = [ + "prometheus", +] + [[package]] name = "migrations_internals" version = "2.2.0" @@ -5434,15 +5431,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -5475,8 +5463,8 @@ name = "monitoring_api" version = "0.1.0" dependencies = [ "eth2", - "lighthouse_metrics", "lighthouse_version", + "metrics", "regex", "reqwest", "sensitive_url", @@ -5657,11 +5645,11 @@ dependencies = [ "igd-next", "itertools 0.10.5", "kzg", - "lighthouse_metrics", "lighthouse_network", "logging", "lru_cache", "matches", + "metrics", "operation_pool", "parking_lot 0.12.3", "rand", @@ -5931,9 +5919,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.3.1+3.3.1" +version = "300.3.2+3.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91" +checksum = "a211a18d945ef7e648cc6e0058f4c548ee46aab922ea203e0d30e966ea23647b" dependencies = [ "cc", ] @@ -5961,8 +5949,8 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.10.5", - "lighthouse_metrics", "maplit", + "metrics", "parking_lot 0.12.3", "rand", "rayon", @@ -6054,9 +6042,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -6101,7 +6089,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", "smallvec", "windows-targets 0.52.6", ] @@ -6171,9 +6159,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.11" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", @@ -6274,9 +6262,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -6287,15 +6275,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] @@ -6310,7 +6298,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.35", + "rustix 0.38.37", "tracing", "windows-sys 0.59.0", ] @@ -6358,9 +6346,9 @@ dependencies = [ [[package]] name = "postgres-types" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02048d9e032fb3cc3413bbf7b83a15d84a5d419778e2628751896d856498eee9" +checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" dependencies = [ "bytes", "fallible-iterator", @@ -6384,9 +6372,9 @@ dependencies = [ [[package]] name = "pq-sys" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a24ff9e4cf6945c988f0db7005d87747bf72864965c3529d259ad155ac41d584" +checksum = "f6cc05d7ea95200187117196eee9edd0644424911821aeb28a18ce60ea0b8793" dependencies = [ "vcpkg", ] @@ -6451,7 +6439,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -6636,9 +6624,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2d2fb862b7ba45e615c1429def928f2e15f815bdf933b27a2d3824e224c1f46" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", "futures-io", @@ -6646,7 +6634,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "socket2 0.5.7", "thiserror", "tokio", @@ -6655,15 +6643,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.7" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0a9b3a42929fad8a7c3de7f86ce0814cfa893328157672680e9fb1145549c5" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -6672,15 +6660,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", "socket2 0.5.7", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6798,9 +6786,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.2" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58323dc32ea52a8ae105ff94bc0460c5d906307533ba3401aa63db3cbe491fe5" +checksum = "074373f3e7e5d27d8741d19512232adb47be8622d3daef3a45bcae72050c3d2a" dependencies = [ "libc", ] @@ -6816,18 +6804,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -7181,9 +7160,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.35" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -7213,21 +7192,21 @@ dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -7269,9 +7248,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -7361,11 +7340,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7454,9 +7433,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -7505,9 +7484,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -7524,9 +7503,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -7535,9 +7514,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -7681,9 +7660,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", "cfg-if", @@ -7791,12 +7770,12 @@ dependencies = [ "filesystem", "flate2", "libmdbx", - "lighthouse_metrics", "lmdb-rkv", "lmdb-rkv-sys", "logging", "lru", "maplit", + "metrics", "parking_lot 0.12.3", "rand", "rayon", @@ -7952,7 +7931,7 @@ dependencies = [ name = "slot_clock" version = "0.2.0" dependencies = [ - "lighthouse_metrics", + "metrics", "parking_lot 0.12.3", "types", ] @@ -8080,8 +8059,8 @@ dependencies = [ "int_to_bytes", "integer-sqrt", "itertools 0.10.5", - "lighthouse_metrics", "merkle_proof", + "metrics", "rand", "rayon", "safe_arith", @@ -8121,8 +8100,8 @@ dependencies = [ "ethereum_ssz_derive", "itertools 0.10.5", "leveldb", - "lighthouse_metrics", "lru", + "metrics", "parking_lot 0.12.3", "safe_arith", "serde", @@ -8332,11 +8311,12 @@ version = "0.1.0" dependencies = [ "async-channel", "futures", - "lighthouse_metrics", "logging", + "metrics", "slog", "sloggers", "tokio", + "tracing", ] [[package]] @@ -8348,7 +8328,7 @@ dependencies = [ "cfg-if", "fastrand", "once_cell", - "rustix 0.38.35", + "rustix 0.38.37", "windows-sys 0.59.0", ] @@ -8378,7 +8358,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.35", + "rustix 0.38.37", "windows-sys 0.48.0", ] @@ -8417,18 +8397,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", @@ -8630,9 +8610,9 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03adcf0147e203b6032c0b2d30be1415ba03bc348901f3ff1cc0df6a733e60c3" +checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" dependencies = [ "async-trait", "byteorder", @@ -8677,9 +8657,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -8689,9 +8669,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -8720,7 +8700,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -8745,9 +8725,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -8769,6 +8749,21 @@ dependencies = [ "tokio", "tower-layer", "tower-service", +] + +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tower-layer", + "tower-service", "tracing", ] @@ -9042,15 +9037,15 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -9063,9 +9058,9 @@ checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -9170,11 +9165,11 @@ dependencies = [ "hyper 1.4.1", "itertools 0.10.5", "libsecp256k1", - "lighthouse_metrics", "lighthouse_version", "lockfile", "logging", "malloc_utils", + "metrics", "monitoring_api", "parking_lot 0.12.3", "rand", @@ -9227,6 +9222,7 @@ dependencies = [ "account_utils", "clap", "clap_utils", + "derivative", "environment", "eth2", "eth2_network_config", @@ -9338,7 +9334,7 @@ dependencies = [ "bytes", "eth2", "headers", - "lighthouse_metrics", + "metrics", "safe_arith", "serde", "serde_array_query", @@ -9544,11 +9540,11 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "whoami" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.4.1", + "redox_syscall 0.5.4", "wasite", "web-sys", ] @@ -9943,9 +9939,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "xmltree" diff --git a/Cargo.toml b/Cargo.toml index 94ac8e13ff..7094ff6077 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", - "common/lighthouse_metrics", + "common/metrics", "common/lighthouse_version", "common/lockfile", "common/logging", @@ -141,6 +141,7 @@ milhouse = "0.3" num_cpus = "1" parking_lot = "0.12" paste = "1" +prometheus = "0.13" quickcheck = "1" quickcheck_macros = "1" quote = "1" @@ -213,7 +214,7 @@ gossipsub = { path = "beacon_node/lighthouse_network/gossipsub/" } http_api = { path = "beacon_node/http_api" } int_to_bytes = { path = "consensus/int_to_bytes" } kzg = { path = "crypto/kzg" } -lighthouse_metrics = { path = "common/lighthouse_metrics" } +metrics = { path = "common/metrics" } lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_version = { path = "common/lighthouse_version" } lockfile = { path = "common/lockfile" } diff --git a/Makefile b/Makefile index 32665d43ae..fd7d45f26a 100644 --- a/Makefile +++ b/Makefile @@ -183,7 +183,7 @@ test-exec-engine: # test vectors. test: test-release -# Updates the CLI help text pages in the Lighthouse book, building with Docker. +# Updates the CLI help text pages in the Lighthouse book, building with Docker (primarily for Windows users). cli: docker run --rm --user=root \ -v ${PWD}:/home/runner/actions-runner/lighthouse sigmaprime/github-runner \ diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 0dc941df90..b0fa013180 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -42,7 +42,7 @@ hex = { workspace = true } int_to_bytes = { workspace = true } itertools = { workspace = true } kzg = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } lru = { workspace = true } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5d7d7f0e06..f8dfbc5515 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,7 +22,7 @@ pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; use crate::data_availability_checker::{ Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, - DataColumnsToPublish, + DataColumnReconstructionResult, }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; @@ -3015,13 +3015,7 @@ impl BeaconChain { self: &Arc, data_columns: Vec>, publish_fn: impl FnOnce() -> Result<(), BlockError>, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { let Ok((slot, block_root)) = data_columns .iter() .map(|c| (c.slot(), c.block_root())) @@ -3051,7 +3045,7 @@ impl BeaconChain { publish_fn, ) .await; - self.remove_notified_custody_columns(&block_root, r) + self.remove_notified(&block_root, r) } /// Cache the blobs in the processing cache, process it, then evict it from the cache if it was @@ -3110,13 +3104,7 @@ impl BeaconChain { pub async fn process_rpc_custody_columns( self: &Arc, custody_columns: DataColumnSidecarList, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { let Ok((slot, block_root)) = custody_columns .iter() .map(|c| (c.slot(), c.block_root())) @@ -3154,7 +3142,67 @@ impl BeaconChain { let r = self .check_rpc_custody_columns_availability_and_import(slot, block_root, custody_columns) .await; - self.remove_notified_custody_columns(&block_root, r) + self.remove_notified(&block_root, r) + } + + pub async fn reconstruct_data_columns( + self: &Arc, + block_root: Hash256, + ) -> Result< + Option<( + AvailabilityProcessingStatus, + DataColumnSidecarList, + )>, + BlockError, + > { + // As of now we only reconstruct data columns on supernodes, so if the block is already + // available on a supernode, there's no need to reconstruct as the node must already have + // all columns. + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Ok(None); + } + + let data_availability_checker = self.data_availability_checker.clone(); + + let result = self + .task_executor + .spawn_blocking_handle( + move || data_availability_checker.reconstruct_data_columns(&block_root), + "reconstruct_data_columns", + ) + .ok_or(BeaconChainError::RuntimeShutdown)? + .await + .map_err(BeaconChainError::TokioJoin)??; + + match result { + DataColumnReconstructionResult::Success((availability, data_columns_to_publish)) => { + let Some(slot) = data_columns_to_publish.first().map(|d| d.slot()) else { + // This should be unreachable because empty result would return `RecoveredColumnsNotImported` instead of success. + return Ok(None); + }; + + let r = self + .process_availability(slot, availability, || Ok(())) + .await; + self.remove_notified(&block_root, r) + .map(|availability_processing_status| { + Some((availability_processing_status, data_columns_to_publish)) + }) + } + DataColumnReconstructionResult::NotStarted(reason) + | DataColumnReconstructionResult::RecoveredColumnsNotImported(reason) => { + // We use metric here because logging this would be *very* noisy. + metrics::inc_counter_vec( + &metrics::KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL, + &[reason], + ); + Ok(None) + } + } } /// Remove any block components from the *processing cache* if we no longer require them. If the @@ -3172,23 +3220,6 @@ impl BeaconChain { r } - /// Remove any block components from the *processing cache* if we no longer require them. If the - /// block was imported full or erred, we no longer require them. - fn remove_notified_custody_columns

( - &self, - block_root: &Hash256, - r: Result<(AvailabilityProcessingStatus, P), BlockError>, - ) -> Result<(AvailabilityProcessingStatus, P), BlockError> { - let has_missing_components = matches!( - r, - Ok((AvailabilityProcessingStatus::MissingComponents(_, _), _)) - ); - if !has_missing_components { - self.reqresp_pre_import_cache.write().remove(block_root); - } - r - } - /// Wraps `process_block` in logic to cache the block's commitments in the processing cache /// and evict if the block was imported or errored. pub async fn process_block_with_early_caching>( @@ -3444,26 +3475,21 @@ impl BeaconChain { block_root: Hash256, data_columns: Vec>, publish_fn: impl FnOnce() -> Result<(), BlockError>, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { if let Some(slasher) = self.slasher.as_ref() { for data_colum in &data_columns { slasher.accept_block_header(data_colum.signed_block_header()); } } - let (availability, data_columns_to_publish) = self - .data_availability_checker - .put_gossip_data_columns(slot, block_root, data_columns)?; + let availability = self.data_availability_checker.put_gossip_data_columns( + slot, + block_root, + data_columns, + )?; self.process_availability(slot, availability, publish_fn) .await - .map(|result| (result, data_columns_to_publish)) } /// Checks if the provided blobs can make any cached blocks available, and imports immediately @@ -3513,13 +3539,7 @@ impl BeaconChain { slot: Slot, block_root: Hash256, custody_columns: DataColumnSidecarList, - ) -> Result< - ( - AvailabilityProcessingStatus, - DataColumnsToPublish, - ), - BlockError, - > { + ) -> Result { // Need to scope this to ensure the lock is dropped before calling `process_availability` // Even an explicit drop is not enough to convince the borrow checker. { @@ -3544,16 +3564,14 @@ impl BeaconChain { // This slot value is purely informative for the consumers of // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. - let (availability, data_columns_to_publish) = - self.data_availability_checker.put_rpc_custody_columns( - block_root, - slot.epoch(T::EthSpec::slots_per_epoch()), - custody_columns, - )?; + let availability = self.data_availability_checker.put_rpc_custody_columns( + block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + custody_columns, + )?; self.process_availability(slot, availability, || Ok(())) .await - .map(|result| (result, data_columns_to_publish)) } /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a8233f170f..527462ab64 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -55,8 +55,8 @@ use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlo use crate::data_column_verification::GossipDataColumnError; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ - is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, - AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, + validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, + NotifyExecutionLayer, PayloadNotifier, }; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_block_producers::SeenBlock; @@ -70,11 +70,11 @@ use derivative::Derivative; use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; -use lighthouse_metrics::TryExt; +use metrics::TryExt; use parking_lot::RwLockReadGuard; use proto_array::Block as ProtoBlock; use safe_arith::ArithError; -use slog::{debug, error, warn, Logger}; +use slog::{debug, error, Logger}; use slot_clock::SlotClock; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -95,9 +95,9 @@ use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, - BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - FullPayload, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, + Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; pub const POS_PANDA_BANNER: &str = r#" @@ -1388,28 +1388,6 @@ impl ExecutionPendingBlock { } let payload_verification_status = payload_notifier.notify_new_payload().await?; - // If the payload did not validate or invalidate the block, check to see if this block is - // valid for optimistic import. - if payload_verification_status.is_optimistic() { - let block_hash_opt = block - .message() - .body() - .execution_payload() - .map(|full_payload| full_payload.block_hash()); - - // Ensure the block is a candidate for optimistic import. - if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await? - { - warn!( - chain.log, - "Rejecting optimistic block"; - "block_hash" => ?block_hash_opt, - "msg" => "the execution engine is not synced" - ); - return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()); - } - } - Ok(PayloadVerificationOutcome { payload_verification_status, is_valid_merge_transition_block, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 001dbf0080..5f1e94fc8c 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -984,6 +984,7 @@ where store, self.import_all_data_columns, self.spec, + log.new(o!("service" => "data_availability_checker")), ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 395f40c5db..047764d705 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -2,10 +2,12 @@ use crate::blob_verification::{verify_kzg_for_blob_list, GossipVerifiedBlob, Kzg use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock, }; -use crate::data_availability_checker::overflow_lru_cache::DataAvailabilityCheckerInner; -use crate::{BeaconChain, BeaconChainTypes, BeaconStore}; +use crate::data_availability_checker::overflow_lru_cache::{ + DataAvailabilityCheckerInner, ReconstructColumnsDecision, +}; +use crate::{metrics, BeaconChain, BeaconChainTypes, BeaconStore}; use kzg::Kzg; -use slog::{debug, error}; +use slog::{debug, error, Logger}; use slot_clock::SlotClock; use std::fmt; use std::fmt::Debug; @@ -27,11 +29,12 @@ use crate::data_column_verification::{ verify_kzg_for_data_column, verify_kzg_for_data_column_list, CustodyDataColumn, GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, }; +use crate::metrics::{ + KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, +}; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; -pub use self::overflow_lru_cache::DataColumnsToPublish; - /// The LRU Cache stores `PendingComponents` which can store up to /// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So /// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this @@ -71,6 +74,16 @@ pub struct DataAvailabilityChecker { slot_clock: T::SlotClock, kzg: Arc, spec: Arc, + log: Logger, +} + +pub type AvailabilityAndReconstructedColumns = (Availability, DataColumnSidecarList); + +#[derive(Debug)] +pub enum DataColumnReconstructionResult { + Success(AvailabilityAndReconstructedColumns), + NotStarted(&'static str), + RecoveredColumnsNotImported(&'static str), } /// This type is returned after adding a block / blob to the `DataAvailabilityChecker`. @@ -101,6 +114,7 @@ impl DataAvailabilityChecker { store: BeaconStore, import_all_data_columns: bool, spec: Arc, + log: Logger, ) -> Result { let custody_subnet_count = if import_all_data_columns { spec.data_column_sidecar_subnet_count as usize @@ -124,6 +138,7 @@ impl DataAvailabilityChecker { slot_clock, kzg, spec, + log, }) } @@ -205,7 +220,7 @@ impl DataAvailabilityChecker { .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache - .put_kzg_verified_blobs(block_root, epoch, verified_blobs) + .put_kzg_verified_blobs(block_root, epoch, verified_blobs, &self.log) } /// Put a list of custody columns received via RPC into the availability cache. This performs KZG @@ -216,8 +231,7 @@ impl DataAvailabilityChecker { block_root: Hash256, epoch: Epoch, custody_columns: DataColumnSidecarList, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + ) -> Result, AvailabilityCheckError> { // TODO(das): report which column is invalid for proper peer scoring // TODO(das): batch KZG verification here, but fallback into checking each column // individually to report which column(s) are invalid. @@ -233,10 +247,10 @@ impl DataAvailabilityChecker { .collect::, AvailabilityCheckError>>()?; self.availability_cache.put_kzg_verified_data_columns( - &self.kzg, block_root, epoch, verified_custody_columns, + &self.log, ) } @@ -253,6 +267,7 @@ impl DataAvailabilityChecker { gossip_blob.block_root(), gossip_blob.epoch(), vec![gossip_blob.into_inner()], + &self.log, ) } @@ -267,8 +282,7 @@ impl DataAvailabilityChecker { slot: Slot, block_root: Hash256, gossip_data_columns: Vec>, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + ) -> Result, AvailabilityCheckError> { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); let custody_columns = gossip_data_columns @@ -277,10 +291,10 @@ impl DataAvailabilityChecker { .collect::>(); self.availability_cache.put_kzg_verified_data_columns( - &self.kzg, block_root, epoch, custody_columns, + &self.log, ) } @@ -291,7 +305,7 @@ impl DataAvailabilityChecker { executed_block: AvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError> { self.availability_cache - .put_pending_executed_block(executed_block) + .put_pending_executed_block(executed_block, &self.log) } pub fn remove_pending_components(&self, block_root: Hash256) { @@ -511,6 +525,92 @@ impl DataAvailabilityChecker { block_cache_size: self.availability_cache.block_cache_size(), } } + + pub fn reconstruct_data_columns( + &self, + block_root: &Hash256, + ) -> Result, AvailabilityCheckError> { + let pending_components = match self + .availability_cache + .check_and_set_reconstruction_started(block_root) + { + ReconstructColumnsDecision::Yes(pending_components) => pending_components, + ReconstructColumnsDecision::No(reason) => { + return Ok(DataColumnReconstructionResult::NotStarted(reason)); + } + }; + + metrics::inc_counter(&KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS); + let timer = metrics::start_timer(&metrics::DATA_AVAILABILITY_RECONSTRUCTION_TIME); + + let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( + &self.kzg, + &pending_components.verified_data_columns, + &self.spec, + ) + .map_err(|e| { + error!( + self.log, + "Error reconstructing data columns"; + "block_root" => ?block_root, + "error" => ?e + ); + self.availability_cache + .handle_reconstruction_failure(block_root); + metrics::inc_counter(&KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES); + AvailabilityCheckError::ReconstructColumnsError(e) + })?; + + // Check indices from cache again to make sure we don't publish components we've already received. + let Some(existing_column_indices) = self.cached_data_column_indexes(block_root) else { + return Ok(DataColumnReconstructionResult::RecoveredColumnsNotImported( + "block already imported", + )); + }; + + let data_columns_to_publish = all_data_columns + .into_iter() + .filter(|d| !existing_column_indices.contains(&d.index())) + .collect::>(); + + let Some(slot) = data_columns_to_publish + .first() + .map(|d| d.as_data_column().slot()) + else { + return Ok(DataColumnReconstructionResult::RecoveredColumnsNotImported( + "No new columns to import and publish", + )); + }; + + metrics::stop_timer(timer); + metrics::inc_counter_by( + &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, + data_columns_to_publish.len() as u64, + ); + + debug!(self.log, "Reconstructed columns"; + "count" => data_columns_to_publish.len(), + "block_root" => ?block_root, + "slot" => slot, + ); + + self.availability_cache + .put_kzg_verified_data_columns( + *block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + data_columns_to_publish.clone(), + &self.log, + ) + .map(|availability| { + DataColumnReconstructionResult::Success(( + availability, + data_columns_to_publish + .into_iter() + .map(|d| d.clone_arc()) + .collect::>(), + )) + }) + } } /// Helper struct to group data availability checker metrics. diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 8f91bf34fc..6d4636e8ed 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -6,23 +6,19 @@ use crate::block_verification_types::{ }; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; use crate::data_column_verification::KzgVerifiedCustodyDataColumn; -use crate::metrics; use crate::BeaconChainTypes; -use kzg::Kzg; use lru::LruCache; use parking_lot::RwLock; +use slog::{debug, Logger}; use ssz_types::{FixedVector, VariableList}; -use std::collections::HashSet; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, + Hash256, SignedBeaconBlock, }; -pub type DataColumnsToPublish = Option>; - /// This represents the components of a partially available block /// /// The blobs are all gossip and kzg verified. @@ -95,7 +91,7 @@ impl PendingComponents { /// block. /// /// This corresponds to the number of commitments that are present in a block. - pub fn num_expected_blobs(&self) -> Option { + pub fn block_kzg_commitments_count(&self) -> Option { self.get_cached_block() .as_ref() .map(|b| b.get_commitments().len()) @@ -203,21 +199,61 @@ impl PendingComponents { /// /// Returns `true` if both the block exists and the number of received blobs / custody columns /// matches the number of expected blobs / custody columns. - pub fn is_available(&self, block_import_requirement: &BlockImportRequirement) -> bool { + pub fn is_available( + &self, + block_import_requirement: &BlockImportRequirement, + log: &Logger, + ) -> bool { + let block_kzg_commitments_count_opt = self.block_kzg_commitments_count(); + match block_import_requirement { - BlockImportRequirement::AllBlobs => self - .num_expected_blobs() - .map_or(false, |num_expected_blobs| { - num_expected_blobs == self.num_received_blobs() - }), + BlockImportRequirement::AllBlobs => { + let received_blobs = self.num_received_blobs(); + let expected_blobs_msg = block_kzg_commitments_count_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); + + debug!(log, + "Component(s) added to data availability checker"; + "block_root" => ?self.block_root, + "received_block" => block_kzg_commitments_count_opt.is_some(), + "received_blobs" => received_blobs, + "expected_blobs" => expected_blobs_msg, + ); + + block_kzg_commitments_count_opt.map_or(false, |num_expected_blobs| { + num_expected_blobs == received_blobs + }) + } BlockImportRequirement::ColumnSampling(num_expected_columns) => { - let num_received_data_columns = self.num_received_data_columns(); // No data columns when there are 0 blobs - self.num_expected_blobs() - .map_or(false, |num_expected_blobs| { - num_expected_blobs == 0 - || *num_expected_columns == num_received_data_columns - }) + let expected_columns_opt = block_kzg_commitments_count_opt.map(|blob_count| { + if blob_count > 0 { + *num_expected_columns + } else { + 0 + } + }); + + let expected_columns_msg = expected_columns_opt + .as_ref() + .map(|num| num.to_string()) + .unwrap_or("unknown".to_string()); + + let num_received_columns = self.num_received_data_columns(); + + debug!(log, + "Component(s) added to data availability checker"; + "block_root" => ?self.block_root, + "received_block" => block_kzg_commitments_count_opt.is_some(), + "received_columns" => num_received_columns, + "expected_columns" => expected_columns_msg, + ); + + expected_columns_opt.map_or(false, |num_expected_columns| { + num_expected_columns == num_received_columns + }) } } } @@ -311,10 +347,6 @@ impl PendingComponents { ))) } - pub fn reconstruction_started(&mut self) { - self.reconstruction_started = true; - } - /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob. pub fn epoch(&self) -> Option { self.executed_block @@ -358,6 +390,15 @@ pub struct DataAvailabilityCheckerInner { spec: Arc, } +// This enum is only used internally within the crate in the reconstruction function to improve +// readability, so it's OK to not box the variant value, and it shouldn't impact memory much with +// the current usage, as it's deconstructed immediately. +#[allow(clippy::large_enum_variant)] +pub(crate) enum ReconstructColumnsDecision { + Yes(PendingComponents), + No(&'static str), +} + impl DataAvailabilityCheckerInner { pub fn new( capacity: NonZeroUsize, @@ -448,33 +489,12 @@ impl DataAvailabilityCheckerInner { } } - /// Potentially trigger reconstruction if: - /// - Our custody requirement is all columns - /// - We >= 50% of columns, but not all columns - fn should_reconstruct( - &self, - block_import_requirement: &BlockImportRequirement, - pending_components: &PendingComponents, - ) -> bool { - let BlockImportRequirement::ColumnSampling(num_expected_columns) = block_import_requirement - else { - return false; - }; - - let num_of_columns = self.spec.number_of_columns; - let has_missing_columns = pending_components.verified_data_columns.len() < num_of_columns; - - has_missing_columns - && !pending_components.reconstruction_started - && *num_expected_columns == num_of_columns - && pending_components.verified_data_columns.len() >= num_of_columns / 2 - } - pub fn put_kzg_verified_blobs>>( &self, block_root: Hash256, epoch: Epoch, kzg_verified_blobs: I, + log: &Logger, ) -> Result, AvailabilityCheckError> { let mut fixed_blobs = FixedVector::default(); @@ -496,7 +516,7 @@ impl DataAvailabilityCheckerInner { pending_components.merge_blobs(fixed_blobs); let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); @@ -514,12 +534,11 @@ impl DataAvailabilityCheckerInner { I: IntoIterator>, >( &self, - kzg: &Kzg, block_root: Hash256, epoch: Epoch, kzg_verified_data_columns: I, - ) -> Result<(Availability, DataColumnsToPublish), AvailabilityCheckError> - { + log: &Logger, + ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); // Grab existing entry or create a new entry. @@ -533,65 +552,67 @@ impl DataAvailabilityCheckerInner { let block_import_requirement = self.block_import_requirement(epoch)?; - // Potentially trigger reconstruction if: - // - Our custody requirement is all columns - // - We >= 50% of columns - let data_columns_to_publish = - if self.should_reconstruct(&block_import_requirement, &pending_components) { - pending_components.reconstruction_started(); - - let timer = metrics::start_timer(&metrics::DATA_AVAILABILITY_RECONSTRUCTION_TIME); - - let existing_column_indices = pending_components - .verified_data_columns - .iter() - .map(|d| d.index()) - .collect::>(); - - // Will only return an error if: - // - < 50% of columns - // - There are duplicates - let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( - kzg, - pending_components.verified_data_columns.as_slice(), - &self.spec, - ) - .map_err(AvailabilityCheckError::ReconstructColumnsError)?; - - let data_columns_to_publish = all_data_columns - .iter() - .filter(|d| !existing_column_indices.contains(&d.index())) - .map(|d| d.clone_arc()) - .collect::>(); - - pending_components.verified_data_columns = all_data_columns; - - metrics::stop_timer(timer); - metrics::inc_counter_by( - &metrics::DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS, - data_columns_to_publish.len() as u64, - ); - - Some(data_columns_to_publish) - } else { - None - }; - - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); - pending_components - .make_available(block_import_requirement, &self.spec, |diet_block| { - self.state_cache.recover_pending_executed_block(diet_block) - }) - .map(|availability| (availability, data_columns_to_publish)) + pending_components.make_available(block_import_requirement, &self.spec, |diet_block| { + self.state_cache.recover_pending_executed_block(diet_block) + }) } else { write_lock.put(block_root, pending_components); - Ok(( - Availability::MissingComponents(block_root), - data_columns_to_publish, - )) + Ok(Availability::MissingComponents(block_root)) + } + } + + /// Check whether data column reconstruction should be attempted. + /// + /// Potentially trigger reconstruction if: + /// - Our custody requirement is all columns (supernode), and we haven't got all columns + /// - We have >= 50% of columns, but not all columns + /// - Reconstruction hasn't been started for the block + /// + /// If reconstruction is required, returns `PendingComponents` which contains the + /// components to be used as inputs to reconstruction, otherwise returns a `reason`. + pub fn check_and_set_reconstruction_started( + &self, + block_root: &Hash256, + ) -> ReconstructColumnsDecision { + let mut write_lock = self.critical.write(); + let Some(pending_components) = write_lock.get_mut(block_root) else { + // Block may have been imported as it does not exist in availability cache. + return ReconstructColumnsDecision::No("block already imported"); + }; + + // If we're sampling all columns, it means we must be custodying all columns. + let custody_column_count = self.sampling_column_count(); + let total_column_count = self.spec.number_of_columns; + let received_column_count = pending_components.verified_data_columns.len(); + + if pending_components.reconstruction_started { + return ReconstructColumnsDecision::No("already started"); + } + if custody_column_count != total_column_count { + return ReconstructColumnsDecision::No("not required for full node"); + } + if received_column_count == self.spec.number_of_columns { + return ReconstructColumnsDecision::No("all columns received"); + } + if received_column_count < total_column_count / 2 { + return ReconstructColumnsDecision::No("not enough columns"); + } + + pending_components.reconstruction_started = true; + ReconstructColumnsDecision::Yes(pending_components.clone()) + } + + /// This could mean some invalid data columns made it through to the `DataAvailabilityChecker`. + /// In this case, we remove all data columns in `PendingComponents`, reset reconstruction + /// status so that we can attempt to retrieve columns from peers again. + pub fn handle_reconstruction_failure(&self, block_root: &Hash256) { + if let Some(pending_components_mut) = self.critical.write().get_mut(block_root) { + pending_components_mut.verified_data_columns = vec![]; + pending_components_mut.reconstruction_started = false; } } @@ -600,6 +621,7 @@ impl DataAvailabilityCheckerInner { pub fn put_pending_executed_block( &self, executed_block: AvailabilityPendingExecutedBlock, + log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); let block_root = executed_block.import_data.block_root; @@ -621,7 +643,7 @@ impl DataAvailabilityCheckerInner { // Check if we have all components and entire set is consistent. let block_import_requirement = self.block_import_requirement(epoch)?; - if pending_components.is_available(&block_import_requirement) { + if pending_components.is_available(&block_import_requirement, log) { write_lock.put(block_root, pending_components.clone()); // No need to hold the write lock anymore drop(write_lock); @@ -919,7 +941,7 @@ mod test { ); assert!(cache.critical.read().is_empty(), "cache should be empty"); let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); if blobs_expected == 0 { assert!( @@ -958,7 +980,7 @@ mod test { for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone()) + .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); if blob_index == blobs_expected - 1 { assert!(matches!(availability, Availability::Available(_))); @@ -985,7 +1007,7 @@ mod test { for gossip_blob in blobs { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone()) + .put_kzg_verified_blobs(root, epoch, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); assert_eq!( availability, @@ -995,7 +1017,7 @@ mod test { assert_eq!(cache.critical.read().len(), 1); } let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); assert!( matches!(availability, Availability::Available(_)), @@ -1063,7 +1085,7 @@ mod test { // put the block in the cache let availability = cache - .put_pending_executed_block(pending_block) + .put_pending_executed_block(pending_block, harness.logger()) .expect("should put block"); // grab the diet block from the cache for later testing diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 44873fab4a..a4e83b2751 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -313,10 +313,7 @@ impl KzgVerifiedCustodyDataColumn { kzg: &Kzg, partial_set_of_columns: &[Self], spec: &ChainSpec, - ) -> Result, KzgError> { - // Will only return an error if: - // - < 50% of columns - // - There are duplicates + ) -> Result>, KzgError> { let all_data_columns = reconstruct_data_columns( kzg, &partial_set_of_columns @@ -328,10 +325,8 @@ impl KzgVerifiedCustodyDataColumn { Ok(all_data_columns .into_iter() - .map(|d| { - KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { - data: d, - }) + .map(|data| { + KzgVerifiedCustodyDataColumn::from_asserted_custody(KzgVerifiedDataColumn { data }) }) .collect::>()) } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index b9b98bfbc0..f2420eea0d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -277,9 +277,7 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } .into()), None => { - if allow_optimistic_import == AllowOptimisticImport::Yes - && is_optimistic_candidate_block(chain, block.slot(), block.parent_root()).await? - { + if allow_optimistic_import == AllowOptimisticImport::Yes { debug!( chain.log, "Optimistically importing merge transition block"; @@ -297,36 +295,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( } } -/// Check to see if a block with the given parameters is valid to be imported optimistically. -pub async fn is_optimistic_candidate_block( - chain: &Arc>, - block_slot: Slot, - block_parent_root: Hash256, -) -> Result { - let current_slot = chain.slot()?; - let inner_chain = chain.clone(); - - // Use a blocking task to check if the block is an optimistic candidate. Interacting - // with the `fork_choice` lock in an async task can block the core executor. - chain - .spawn_blocking_handle( - move || { - inner_chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_candidate_block( - current_slot, - block_slot, - &block_parent_root, - &inner_chain.spec, - ) - }, - "validate_merge_block_optimistic_candidate", - ) - .await? - .map_err(BeaconChainError::from) -} - /// Validate the gossip block's execution_payload according to the checks described here: /// https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/p2p-interface.md#beacon_block pub fn validate_execution_payload_for_gossip( diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index ca015d0365..e0ddd8c882 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,25 +1,19 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; -use eth2::types::light_client_update::CurrentSyncCommitteeProofLen; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; use slog::{debug, Logger}; use ssz::Decode; -use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use store::DBColumn; use store::KeyValueStore; use tree_hash::TreeHash; -use types::light_client_update::{ - FinalizedRootProofLen, NextSyncCommitteeProofLen, CURRENT_SYNC_COMMITTEE_INDEX, - FINALIZED_ROOT_INDEX, NEXT_SYNC_COMMITTEE_INDEX, -}; use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, EthSpec, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - LightClientUpdate, Slot, SyncAggregate, SyncCommittee, + LightClientUpdate, MerkleProof, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -69,17 +63,14 @@ impl LightClientServerCache { block_post_state: &mut BeaconState, ) -> Result<(), BeaconChainError> { let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES); - + let fork_name = spec.fork_name_at_slot::(block.slot()); // Only post-altair - if spec.fork_name_at_slot::(block.slot()) == ForkName::Base { - return Ok(()); + if fork_name.altair_enabled() { + // Persist in memory cache for a descendent block + let cached_data = LightClientCachedData::from_state(block_post_state)?; + self.prev_block_cache.lock().put(block_root, cached_data); } - // Persist in memory cache for a descendent block - - let cached_data = LightClientCachedData::from_state(block_post_state)?; - self.prev_block_cache.lock().put(block_root, cached_data); - Ok(()) } @@ -413,16 +404,12 @@ impl Default for LightClientServerCache { } } -type FinalityBranch = FixedVector; -type NextSyncCommitteeBranch = FixedVector; -type CurrentSyncCommitteeBranch = FixedVector; - #[derive(Clone)] struct LightClientCachedData { finalized_checkpoint: Checkpoint, - finality_branch: FinalityBranch, - next_sync_committee_branch: NextSyncCommitteeBranch, - current_sync_committee_branch: CurrentSyncCommitteeBranch, + finality_branch: MerkleProof, + next_sync_committee_branch: MerkleProof, + current_sync_committee_branch: MerkleProof, next_sync_committee: Arc>, current_sync_committee: Arc>, finalized_block_root: Hash256, @@ -430,17 +417,18 @@ struct LightClientCachedData { impl LightClientCachedData { fn from_state(state: &mut BeaconState) -> Result { + let (finality_branch, next_sync_committee_branch, current_sync_committee_branch) = ( + state.compute_finalized_root_proof()?, + state.compute_current_sync_committee_proof()?, + state.compute_next_sync_committee_proof()?, + ); Ok(Self { finalized_checkpoint: state.finalized_checkpoint(), - finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + finality_branch, next_sync_committee: state.next_sync_committee()?.clone(), current_sync_committee: state.current_sync_committee()?.clone(), - next_sync_committee_branch: state - .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? - .into(), - current_sync_committee_branch: state - .compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)? - .into(), + next_sync_committee_branch, + current_sync_committee_branch, finalized_block_root: state.finalized_checkpoint().root, }) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index f15b46fc4b..f73775d678 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -2,7 +2,7 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use bls::FixedBytesExtended; -pub use lighthouse_metrics::*; +pub use metrics::*; use slot_clock::SlotClock; use std::sync::LazyLock; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; @@ -1887,6 +1887,31 @@ pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> ) }); +pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "kzg_data_column_reconstruction_attempts", + "Count of times data column reconstruction has been attempted", + ) + }); + +pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> = + LazyLock::new(|| { + try_create_int_counter( + "kzg_data_column_reconstruction_failures", + "Count of times data column reconstruction has failed", + ) + }); + +pub static KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "kzg_data_column_reconstruction_incomplete_total", + "Count of times data column reconstruction attempts did not result in an import", + &["reason"], + ) + }); + /* * light_client server metrics */ diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index e1f2cbb284..0b121356b9 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -86,7 +86,7 @@ async fn produces_attestations_from_attestation_simulator_service() { let expected_miss_metrics_count = 0; let expected_hit_metrics_count = num_blocks_produced - UNAGGREGATED_ATTESTATION_LAG_SLOTS as u64; - lighthouse_metrics::gather().iter().for_each(|mf| { + metrics::gather().iter().for_each(|mf| { if hit_prometheus_metrics.contains(&mf.get_name()) { assert_eq!( mf.get_metric()[0].get_counter().get_value() as u64, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index dd195048e8..1325875a27 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,20 +1,14 @@ #![cfg(not(debug_assertions))] -use beacon_chain::otb_verification_service::{ - load_optimistic_transition_blocks, validate_optimistic_transition_blocks, - OptimisticTransitionBlock, -}; use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer, OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, }; use execution_layer::{ json_structures::{JsonForkchoiceStateV1, JsonPayloadAttributes, JsonPayloadAttributesV1}, - test_utils::ExecutionBlockGenerator, ExecutionLayer, ForkchoiceState, PayloadAttributes, }; use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; @@ -1270,552 +1264,6 @@ async fn attesting_to_optimistic_head() { get_aggregated_by_slot_and_root().unwrap(); } -/// A helper struct to build out a chain of some configurable length which undergoes the merge -/// transition. -struct OptimisticTransitionSetup { - blocks: Vec>>, - execution_block_generator: ExecutionBlockGenerator, -} - -impl OptimisticTransitionSetup { - async fn new(num_blocks: usize, ttd: u64) -> Self { - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(ttd); - let mut rig = InvalidPayloadRig::new_with_spec(spec).enable_attestations(); - rig.move_to_terminal_block(); - - let mut blocks = Vec::with_capacity(num_blocks); - for _ in 0..num_blocks { - let root = rig.import_block(Payload::Valid).await; - let block = rig.harness.chain.get_block(&root).await.unwrap().unwrap(); - blocks.push(Arc::new(block)); - } - - let execution_block_generator = rig - .harness - .mock_execution_layer - .as_ref() - .unwrap() - .server - .execution_block_generator() - .clone(); - - Self { - blocks, - execution_block_generator, - } - } -} - -/// Build a chain which has optimistically imported a transition block. -/// -/// The initial chain will be built with respect to `block_ttd`, whilst the `rig` which imports the -/// chain will operate with respect to `rig_ttd`. This allows for testing mismatched TTDs. -async fn build_optimistic_chain( - block_ttd: u64, - rig_ttd: u64, - num_blocks: usize, -) -> InvalidPayloadRig { - let OptimisticTransitionSetup { - blocks, - execution_block_generator, - } = OptimisticTransitionSetup::new(num_blocks, block_ttd).await; - // Build a brand-new testing harness. We will apply the blocks from the previous harness to - // this one. - let mut spec = E::default_spec(); - spec.terminal_total_difficulty = Uint256::from(rig_ttd); - let rig = InvalidPayloadRig::new_with_spec(spec); - - let spec = &rig.harness.chain.spec; - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - - // Ensure all the execution blocks from the first rig are available in the second rig. - *mock_execution_layer.server.execution_block_generator() = execution_block_generator; - - // Make the execution layer respond `SYNCING` to all `newPayload` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_new_payload(true); - // Make the execution layer respond `SYNCING` to all `forkchoiceUpdated` requests. - mock_execution_layer - .server - .all_payloads_syncing_on_forkchoice_updated(); - // Make the execution layer respond `None` to all `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - let current_slot = std::cmp::max( - blocks[0].slot() + spec.safe_slots_to_import_optimistically, - num_blocks.into(), - ); - rig.harness.set_current_slot(current_slot); - - for block in blocks { - rig.harness - .chain - .process_block( - block.canonical_root(), - block, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ) - .await - .unwrap(); - } - - rig.harness.chain.recompute_head_at_current_slot().await; - - // Make the execution layer respond normally to `getBlockByHash` requests. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - // Perform some sanity checks to ensure that the transition happened exactly where we expected. - let pre_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(0), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let pre_transition_block = rig - .harness - .chain - .get_block(&pre_transition_block_root) - .await - .unwrap() - .unwrap(); - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - assert_eq!( - pre_transition_block_root, - post_transition_block.parent_root(), - "the blocks form a single chain" - ); - assert!( - pre_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has not* undergone the merge transition" - ); - assert!( - !post_transition_block - .message() - .body() - .execution_payload() - .unwrap() - .is_default_with_empty_roots(), - "the block *has* undergone the merge transition" - ); - - // Assert that the transition block was optimistically imported. - // - // Note: we're using the "fallback" check for optimistic status, so if the block was - // pre-finality then we'll just use the optimistic status of the finalized block. - assert!( - rig.harness - .chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block(&post_transition_block_root) - .unwrap(), - "the transition block should be imported optimistically" - ); - - // Get the mock execution layer to respond to `getBlockByHash` requests normally again. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - rig -} - -#[tokio::test] -async fn optimistic_transition_block_valid_unfinalized() { - let ttd = 42; - let num_blocks = 16_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_valid_finalized() { - let ttd = 42; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(ttd, ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - let valid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - valid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should validate fine"); - // now that the transition block has been validated, it should have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert!( - otbs.is_empty(), - "The valid optimistic transition block should have been removed from the database", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_unfinalized_syncing_ee() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 22_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - < post_transition_block.slot(), - "the transition block should not be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It shouldn't be known as invalid yet - assert!(!rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // Make the execution layer respond `None` to all `getBlockByHash` requests to simulate a - // syncing EE. - let mock_execution_layer = rig.harness.mock_execution_layer.as_ref().unwrap(); - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_none(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - // It should still be marked as optimistic. - assert!(rig - .execution_status(post_transition_block_root) - .is_strictly_optimistic()); - - // the optimistic merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The optimistic merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // Allow the EL to respond to `getBlockByHash`, as if it has finished syncing. - mock_execution_layer - .server - .all_get_block_by_hash_requests_return_natural_value(); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .unwrap(); - - // Still no shutdown should've been triggered. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - // It should be marked invalid now - assert!(rig - .execution_status(post_transition_block_root) - .is_invalid()); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - -#[tokio::test] -async fn optimistic_transition_block_invalid_finalized() { - let block_ttd = 42; - let rig_ttd = 1337; - let num_blocks = 130_usize; - let rig = build_optimistic_chain(block_ttd, rig_ttd, num_blocks).await; - - let post_transition_block_root = rig - .harness - .chain - .block_root_at_slot(Slot::new(1), WhenSlotSkipped::None) - .unwrap() - .unwrap(); - let post_transition_block = rig - .harness - .chain - .get_block(&post_transition_block_root) - .await - .unwrap() - .unwrap(); - - assert!( - rig.cached_head() - .finalized_checkpoint() - .epoch - .start_slot(E::slots_per_epoch()) - > post_transition_block.slot(), - "the transition block should be finalized" - ); - - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - - assert_eq!( - otbs.len(), - 1, - "There should be one optimistic transition block" - ); - - let invalid_otb = OptimisticTransitionBlock::from_block(post_transition_block.message()); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); - - // No shutdown should've been triggered yet. - assert_eq!(rig.harness.shutdown_reasons(), vec![]); - - validate_optimistic_transition_blocks(&rig.harness.chain, otbs) - .await - .expect("should invalidate merge transition block and shutdown the client"); - - // The beacon chain should have triggered a shutdown. - assert_eq!( - rig.harness.shutdown_reasons(), - vec![ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON - )] - ); - - // the invalid merge transition block should NOT have been removed from the database - let otbs = load_optimistic_transition_blocks(&rig.harness.chain) - .expect("should load optimistic transition block from db"); - assert_eq!( - otbs.len(), - 1, - "The invalid merge transition block should still be in the database", - ); - assert_eq!( - invalid_otb, otbs[0], - "The optimistic transition block stored in the database should be what we expect", - ); -} - /// Helper for running tests where we generate a chain with an invalid head and then a /// `fork_block` to recover it. struct InvalidHeadSetup { diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 1a6b444319..119722b693 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -112,19 +112,8 @@ async fn light_client_bootstrap_test() { return; }; - let checkpoint_slot = Slot::new(E::slots_per_epoch() * 6); let db_path = tempdir().unwrap(); - let log = test_logger(); - - let seconds_per_slot = spec.seconds_per_slot; - let store = get_store_generic( - &db_path, - StoreConfig { - slots_per_restore_point: 2 * E::slots_per_epoch(), - ..Default::default() - }, - test_spec::(), - ); + let store = get_store_generic(&db_path, StoreConfig::default(), spec.clone()); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); let num_initial_slots = E::slots_per_epoch() * 7; @@ -142,78 +131,8 @@ async fn light_client_bootstrap_test() { ) .await; - let wss_block_root = harness + let finalized_checkpoint = harness .chain - .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness - .chain - .store - .get_full_block(&wss_block_root) - .unwrap() - .unwrap(); - let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); - let wss_state = store - .get_state(&wss_state_root, Some(checkpoint_slot)) - .unwrap() - .unwrap(); - - let kzg = get_kzg(&spec); - - let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); - - // Initialise a new beacon chain from the finalized checkpoint. - // The slot clock must be set to a time ahead of the checkpoint state. - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(seconds_per_slot), - ); - slot_clock.set_slot(harness.get_current_slot().as_u64()); - - let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) - .store(store.clone()) - .custom_spec(test_spec::().into()) - .task_executor(harness.chain.task_executor.clone()) - .logger(log.clone()) - .weak_subjectivity_state( - wss_state, - wss_block.clone(), - wss_blobs_opt.clone(), - genesis_state, - ) - .unwrap() - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .slot_clock(slot_clock) - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .execution_layer(Some(mock.el)) - .build() - .expect("should build"); - - let current_state = harness.get_current_state(); - - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - - let finalized_checkpoint = beacon_chain .canonical_head .cached_head() .finalized_checkpoint(); @@ -248,19 +167,8 @@ async fn light_client_updates_test() { }; let num_final_blocks = E::slots_per_epoch() * 2; - let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); let db_path = tempdir().unwrap(); - let log = test_logger(); - - let seconds_per_slot = spec.seconds_per_slot; - let store = get_store_generic( - &db_path, - StoreConfig { - slots_per_restore_point: 2 * E::slots_per_epoch(), - ..Default::default() - }, - test_spec::(), - ); + let store = get_store_generic(&db_path, StoreConfig::default(), test_spec::()); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); let num_initial_slots = E::slots_per_epoch() * 10; @@ -276,33 +184,6 @@ async fn light_client_updates_test() { ) .await; - let wss_block_root = harness - .chain - .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) - .unwrap() - .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness - .chain - .store - .get_full_block(&wss_block_root) - .unwrap() - .unwrap(); - let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); - let wss_state = store - .get_state(&wss_state_root, Some(checkpoint_slot)) - .unwrap() - .unwrap(); - - let kzg = get_kzg(&spec); - - let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); - harness.advance_slot(); harness .extend_chain_with_light_client_data( @@ -312,52 +193,8 @@ async fn light_client_updates_test() { ) .await; - // Initialise a new beacon chain from the finalized checkpoint. - // The slot clock must be set to a time ahead of the checkpoint state. - let slot_clock = TestingSlotClock::new( - Slot::new(0), - Duration::from_secs(harness.chain.genesis_time), - Duration::from_secs(seconds_per_slot), - ); - slot_clock.set_slot(harness.get_current_slot().as_u64()); - - let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); - - let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, kzg) - .store(store.clone()) - .custom_spec(test_spec::().into()) - .task_executor(harness.chain.task_executor.clone()) - .logger(log.clone()) - .weak_subjectivity_state( - wss_state, - wss_block.clone(), - wss_blobs_opt.clone(), - genesis_state, - ) - .unwrap() - .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") - .slot_clock(slot_clock) - .shutdown_sender(shutdown_tx) - .chain_config(ChainConfig::default()) - .event_handler(Some(ServerSentEventHandler::new_with_capacity( - log.clone(), - 1, - ))) - .execution_layer(Some(mock.el)) - .build() - .expect("should build"); - - let beacon_chain = Arc::new(beacon_chain); - let current_state = harness.get_current_state(); - if ForkName::Electra == current_state.fork_name_unchecked() { - // TODO(electra) fix beacon state `compute_merkle_proof` - return; - } - // calculate the sync period from the previous slot let sync_period = (current_state.slot() - Slot::new(1)) .epoch(E::slots_per_epoch()) @@ -366,7 +203,8 @@ async fn light_client_updates_test() { // fetch a range of light client updates. right now there should only be one light client update // in the db. - let lc_updates = beacon_chain + let lc_updates = harness + .chain .get_light_client_updates(sync_period, 100) .unwrap(); @@ -386,7 +224,8 @@ async fn light_client_updates_test() { .await; // we should now have two light client updates in the db - let lc_updates = beacon_chain + let lc_updates = harness + .chain .get_light_client_updates(sync_period, 100) .unwrap(); diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 554010be07..9273137bf6 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -16,7 +16,7 @@ task_executor = { workspace = true } slot_clock = { workspace = true } lighthouse_network = { workspace = true } types = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } num_cpus = { workspace = true } serde = { workspace = true } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 02c287b68e..2a69b04c91 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -135,6 +135,7 @@ pub struct BeaconProcessorQueueLengths { lc_bootstrap_queue: usize, lc_optimistic_update_queue: usize, lc_finality_update_queue: usize, + lc_update_range_queue: usize, api_request_p0_queue: usize, api_request_p1_queue: usize, } @@ -202,6 +203,7 @@ impl BeaconProcessorQueueLengths { lc_bootstrap_queue: 1024, lc_optimistic_update_queue: 512, lc_finality_update_queue: 512, + lc_update_range_queue: 512, api_request_p0_queue: 1024, api_request_p1_queue: 1024, }) @@ -622,6 +624,7 @@ pub enum Work { LightClientBootstrapRequest(BlockingFn), LightClientOptimisticUpdateRequest(BlockingFn), LightClientFinalityUpdateRequest(BlockingFn), + LightClientUpdatesByRangeRequest(BlockingFn), ApiRequestP0(BlockingOrAsync), ApiRequestP1(BlockingOrAsync), } @@ -673,6 +676,7 @@ pub enum WorkType { LightClientBootstrapRequest, LightClientOptimisticUpdateRequest, LightClientFinalityUpdateRequest, + LightClientUpdatesByRangeRequest, ApiRequestP0, ApiRequestP1, } @@ -723,6 +727,7 @@ impl Work { WorkType::LightClientOptimisticUpdateRequest } Work::LightClientFinalityUpdateRequest(_) => WorkType::LightClientFinalityUpdateRequest, + Work::LightClientUpdatesByRangeRequest(_) => WorkType::LightClientUpdatesByRangeRequest, Work::UnknownBlockAttestation { .. } => WorkType::UnknownBlockAttestation, Work::UnknownBlockAggregate { .. } => WorkType::UnknownBlockAggregate, Work::UnknownBlockSamplingRequest { .. } => WorkType::UnknownBlockSamplingRequest, @@ -902,6 +907,7 @@ impl BeaconProcessor { let mut lc_optimistic_update_queue = FifoQueue::new(queue_lengths.lc_optimistic_update_queue); let mut lc_finality_update_queue = FifoQueue::new(queue_lengths.lc_finality_update_queue); + let mut lc_update_range_queue = FifoQueue::new(queue_lengths.lc_update_range_queue); let mut api_request_p0_queue = FifoQueue::new(queue_lengths.api_request_p0_queue); let mut api_request_p1_queue = FifoQueue::new(queue_lengths.api_request_p1_queue); @@ -1379,6 +1385,9 @@ impl BeaconProcessor { Work::LightClientFinalityUpdateRequest { .. } => { lc_finality_update_queue.push(work, work_id, &self.log) } + Work::LightClientUpdatesByRangeRequest { .. } => { + lc_update_range_queue.push(work, work_id, &self.log) + } Work::UnknownBlockAttestation { .. } => { unknown_block_attestation_queue.push(work) } @@ -1470,6 +1479,7 @@ impl BeaconProcessor { WorkType::LightClientFinalityUpdateRequest => { lc_finality_update_queue.len() } + WorkType::LightClientUpdatesByRangeRequest => lc_update_range_queue.len(), WorkType::ApiRequestP0 => api_request_p0_queue.len(), WorkType::ApiRequestP1 => api_request_p1_queue.len(), }; @@ -1622,7 +1632,8 @@ impl BeaconProcessor { | Work::GossipBlsToExecutionChange(process_fn) | Work::LightClientBootstrapRequest(process_fn) | Work::LightClientOptimisticUpdateRequest(process_fn) - | Work::LightClientFinalityUpdateRequest(process_fn) => { + | Work::LightClientFinalityUpdateRequest(process_fn) + | Work::LightClientUpdatesByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } }; diff --git a/beacon_node/beacon_processor/src/metrics.rs b/beacon_node/beacon_processor/src/metrics.rs index 0a7bdba18d..fc8c712f4e 100644 --- a/beacon_node/beacon_processor/src/metrics.rs +++ b/beacon_node/beacon_processor/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 06f7763c8a..21a6e42cc5 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -33,7 +33,7 @@ sensitive_url = { workspace = true } genesis = { workspace = true } task_executor = { workspace = true } environment = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } time = "0.3.5" directory = { workspace = true } http_api = { workspace = true } diff --git a/beacon_node/client/src/metrics.rs b/beacon_node/client/src/metrics.rs index ebc4fe70a7..e5c07baddc 100644 --- a/beacon_node/client/src/metrics.rs +++ b/beacon_node/client/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static SYNC_SLOTS_PER_SECOND: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 4910cfd2e1..50400a77e0 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -25,7 +25,7 @@ logging = { workspace = true } superstruct = { workspace = true } tokio = { workspace = true } state_processing = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } task_executor = { workspace = true } eth2 = { workspace = true } sensitive_url = { workspace = true } diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs index 9a11e7a692..1df4ba0df9 100644 --- a/beacon_node/eth1/src/metrics.rs +++ b/beacon_node/eth1/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 843a7b83cb..0ef101fae7 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -35,7 +35,7 @@ slot_clock = { workspace = true } tempfile = { workspace = true } rand = { workspace = true } zeroize = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } ethers-core = { workspace = true } builder_client = { path = "../builder_client" } fork_choice = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index ab275e8b11..1c23c8ba66 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -570,6 +570,7 @@ pub enum ClientCode { Lodestar, Nethermind, Nimbus, + TrinExecution, Teku, Prysm, Reth, @@ -588,6 +589,7 @@ impl std::fmt::Display for ClientCode { ClientCode::Lodestar => "LS", ClientCode::Nethermind => "NM", ClientCode::Nimbus => "NB", + ClientCode::TrinExecution => "TE", ClientCode::Teku => "TK", ClientCode::Prysm => "PM", ClientCode::Reth => "RH", @@ -611,6 +613,7 @@ impl TryFrom for ClientCode { "LS" => Ok(Self::Lodestar), "NM" => Ok(Self::Nethermind), "NB" => Ok(Self::Nimbus), + "TE" => Ok(Self::TrinExecution), "TK" => Ok(Self::Teku), "PM" => Ok(Self::Prysm), "RH" => Ok(Self::Reth), diff --git a/beacon_node/execution_layer/src/metrics.rs b/beacon_node/execution_layer/src/metrics.rs index 184031af4d..ab1a22677f 100644 --- a/beacon_node/execution_layer/src/metrics.rs +++ b/beacon_node/execution_layer/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub const HIT: &str = "hit"; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index f3779f0e4a..638fe0f219 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -20,7 +20,7 @@ lighthouse_network = { workspace = true } eth1 = { workspace = true } state_processing = { workspace = true } lighthouse_version = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } warp_utils = { workspace = true } slot_clock = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ffcfda4680..307584b82d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -146,7 +146,6 @@ pub struct Config { pub listen_port: u16, pub allow_origin: Option, pub tls_config: Option, - pub spec_fork_name: Option, pub data_dir: PathBuf, pub sse_capacity_multiplier: usize, pub enable_beacon_processor: bool, @@ -164,7 +163,6 @@ impl Default for Config { listen_port: 5052, allow_origin: None, tls_config: None, - spec_fork_name: None, data_dir: PathBuf::from(DEFAULT_ROOT_DIR), sse_capacity_multiplier: 1, enable_beacon_processor: true, @@ -2643,7 +2641,6 @@ pub fn serve( ); // GET config/spec - let spec_fork_name = ctx.config.spec_fork_name; let get_config_spec = config_path .and(warp::path("spec")) .and(warp::path::end()) @@ -2653,7 +2650,7 @@ pub fn serve( move |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { let config_and_preset = - ConfigAndPreset::from_chain_spec::(&chain.spec, spec_fork_name); + ConfigAndPreset::from_chain_spec::(&chain.spec, None); Ok(api_types::GenericResponse::from(config_and_preset)) }) }, diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs index 970eef8dd0..b6a53b26c6 100644 --- a/beacon_node/http_api/src/metrics.rs +++ b/beacon_node/http_api/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static HTTP_API_PATHS_TOTAL: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index f835d13fb6..97ba72a2ac 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -14,7 +14,7 @@ beacon_chain = { workspace = true } store = { workspace = true } lighthouse_network = { workspace = true } slot_clock = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } lighthouse_version = { workspace = true } warp_utils = { workspace = true } malloc_utils = { workspace = true } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index d68efff432..d751c51e4c 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,8 +1,8 @@ use crate::Context; use beacon_chain::BeaconChainTypes; -use lighthouse_metrics::TextEncoder; use lighthouse_network::prometheus_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; +use metrics::TextEncoder; pub fn gather_prometheus_metrics( ctx: &Context, @@ -17,13 +17,13 @@ pub fn gather_prometheus_metrics( // - Statically updated: things which are only updated at the time of the scrape (used where we // can avoid cluttering up code with metrics calls). // - // The `lighthouse_metrics` crate has a `DEFAULT_REGISTRY` global singleton + // The `metrics` crate has a `DEFAULT_REGISTRY` global singleton // which keeps the state of all the metrics. Dynamically updated things will already be // up-to-date in the registry (because they update themselves) however statically updated // things need to be "scraped". // // We proceed by, first updating all the static metrics using `scrape_for_metrics(..)`. Then, - // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into + // using `metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into // a string that can be returned via HTTP. if let Some(beacon_chain) = ctx.chain.as_ref() { @@ -48,7 +48,7 @@ pub fn gather_prometheus_metrics( } encoder - .encode_utf8(&lighthouse_metrics::gather(), &mut buffer) + .encode_utf8(&metrics::gather(), &mut buffer) .unwrap(); // encode gossipsub metrics also if they exist if let Some(registry) = ctx.gossipsub_registry.as_ref() { diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index b0f5b9a5e1..c4fad99702 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -21,7 +21,7 @@ futures = { workspace = true } error-chain = { workspace = true } dirs = { workspace = true } fnv = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } smallvec = { workspace = true } tokio-io-timeout = "1" lru = { workspace = true } diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index bf77f30979..c50e76e7f2 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -1812,9 +1812,6 @@ where // Calculate the message id on the transformed data. let msg_id = self.config.message_id(&message); - // Broadcast IDONTWANT messages. - self.send_idontwant(&raw_message, &msg_id, propagation_source); - // Check the validity of the message // Peers get penalized if this message is invalid. We don't add it to the duplicate cache // and instead continually penalize peers that repeatedly send this message. @@ -1830,6 +1827,12 @@ where self.mcache.observe_duplicate(&msg_id, propagation_source); return; } + + // Broadcast IDONTWANT messages + if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() { + self.send_idontwant(&raw_message, &msg_id, propagation_source); + } + tracing::debug!( message=%msg_id, "Put message in duplicate_cache and resolve promises" diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs index 00de3ba2db..62f026b568 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour/tests.rs @@ -5266,13 +5266,14 @@ fn sends_idontwant() { let message = RawMessage { source: Some(peers[1]), - data: vec![12], + data: vec![12u8; 1024], sequence_number: Some(0), topic: topic_hashes[0].clone(), signature: None, key: None, validated: true, }; + gs.handle_received_message(message.clone(), &local_id); assert_eq!( receivers @@ -5292,6 +5293,48 @@ fn sends_idontwant() { ); } +#[test] +fn doesnt_sends_idontwant_for_lower_message_size() { + let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + .peer_no(5) + .topics(vec![String::from("topic1")]) + .to_subscribe(true) + .gs_config(Config::default()) + .explicit(1) + .peer_kind(PeerKind::Gossipsubv1_2) + .create_network(); + + let local_id = PeerId::random(); + + let message = RawMessage { + source: Some(peers[1]), + data: vec![12], + sequence_number: Some(0), + topic: topic_hashes[0].clone(), + signature: None, + key: None, + validated: true, + }; + + gs.handle_received_message(message.clone(), &local_id); + assert_eq!( + receivers + .into_iter() + .fold(0, |mut idontwants, (peer_id, c)| { + let non_priority = c.non_priority.into_inner(); + while !non_priority.is_empty() { + if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() { + assert_ne!(peer_id, peers[1]); + idontwants += 1; + } + } + idontwants + }), + 0, + "IDONTWANT was sent" + ); +} + /// Test that a node doesn't send IDONTWANT messages to the mesh peers /// that don't run Gossipsub v1.2. #[test] @@ -5316,6 +5359,7 @@ fn doesnt_send_idontwant() { key: None, validated: true, }; + gs.handle_received_message(message.clone(), &local_id); assert_eq!( receivers diff --git a/beacon_node/lighthouse_network/gossipsub/src/config.rs b/beacon_node/lighthouse_network/gossipsub/src/config.rs index 1296e614c8..eb8dd432a3 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/config.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/config.rs @@ -98,6 +98,7 @@ pub struct Config { connection_handler_queue_len: usize, connection_handler_publish_duration: Duration, connection_handler_forward_duration: Duration, + idontwant_message_size_threshold: usize, } impl Config { @@ -370,6 +371,16 @@ impl Config { pub fn forward_queue_duration(&self) -> Duration { self.connection_handler_forward_duration } + + // The message size threshold for which IDONTWANT messages are sent. + // Sending IDONTWANT messages for small messages can have a negative effect to the overall + // traffic and CPU load. This acts as a lower bound cutoff for the message size to which + // IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + // (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md#idontwant-message) + // default is 1kB + pub fn idontwant_message_size_threshold(&self) -> usize { + self.idontwant_message_size_threshold + } } impl Default for Config { @@ -440,6 +451,7 @@ impl Default for ConfigBuilder { connection_handler_queue_len: 5000, connection_handler_publish_duration: Duration::from_secs(5), connection_handler_forward_duration: Duration::from_millis(1000), + idontwant_message_size_threshold: 1000, }, invalid_protocol: false, } @@ -825,6 +837,17 @@ impl ConfigBuilder { self } + // The message size threshold for which IDONTWANT messages are sent. + // Sending IDONTWANT messages for small messages can have a negative effect to the overall + // traffic and CPU load. This acts as a lower bound cutoff for the message size to which + // IDONTWANT won't be sent to peers. Only works if the peers support Gossipsub1.2 + // (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md#idontwant-message) + // default is 1kB + pub fn idontwant_message_size_threshold(&mut self, size: usize) -> &mut Self { + self.config.idontwant_message_size_threshold = size; + self + } + /// Constructs a [`Config`] from the given configuration and validates the settings. pub fn build(&self) -> Result { // check all constraints on config @@ -895,6 +918,10 @@ impl std::fmt::Debug for Config { "published_message_ids_cache_time", &self.published_message_ids_cache_time, ); + let _ = builder.field( + "idontwant_message_size_threhold", + &self.idontwant_message_size_threshold, + ); builder.finish() } } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index ea29501784..d70e50b1da 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -19,6 +19,7 @@ pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; pub const DEFAULT_TCP_PORT: u16 = 9000u16; pub const DEFAULT_DISC_PORT: u16 = 9000u16; pub const DEFAULT_QUIC_PORT: u16 = 9001u16; +pub const DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD: usize = 1000usize; /// The maximum size of gossip messages. pub fn gossip_max_size(is_merge_enabled: bool, gossip_max_size: usize) -> usize { @@ -141,6 +142,10 @@ pub struct Config { /// Configuration for the inbound rate limiter (requests received by this node). pub inbound_rate_limiter_config: Option, + + /// Configuration for the minimum message size for which IDONTWANT messages are send in the mesh. + /// Lower the value reduces the optimization effect of the IDONTWANT messages. + pub idontwant_message_size_threshold: usize, } impl Config { @@ -352,6 +357,7 @@ impl Default for Config { outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, + idontwant_message_size_threshold: DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD, } } } @@ -433,6 +439,7 @@ pub fn gossipsub_config( gossipsub_config_params: GossipsubConfigParams, seconds_per_slot: u64, slots_per_epoch: u64, + idontwant_message_size_threshold: usize, ) -> gossipsub::Config { fn prefix( prefix: [u8; 4], @@ -498,6 +505,7 @@ pub fn gossipsub_config( .duplicate_cache_time(duplicate_cache_time) .message_id_fn(gossip_message_id) .allow_self_origin(true) + .idontwant_message_size_threshold(idontwant_message_size_threshold) .build() .expect("valid gossipsub configuration") } diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index c3f64a5a1f..15445c7d64 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static NAT_OPEN: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index ec4d892c9b..c1e72d250f 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -558,6 +558,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::BlobsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, Protocol::DataColumnsByRange => PeerAction::MidToleranceError, @@ -585,6 +586,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::MetaData => PeerAction::Fatal, Protocol::Status => PeerAction::Fatal, } @@ -606,6 +608,7 @@ impl PeerManager { Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, + Protocol::LightClientUpdatesByRange => return, Protocol::Goodbye => return, Protocol::MetaData => return, Protocol::Status => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 13af04f9b8..9bdecab70b 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -18,9 +18,9 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, - RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, - SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, - SignedBeaconBlockElectra, + LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, }; use unsigned_varint::codec::Uvi; @@ -76,6 +76,7 @@ impl SSZSnappyInboundCodec { RpcSuccessResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RpcSuccessResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), + RpcSuccessResponse::LightClientUpdatesByRange(res) => res.as_ssz_bytes(), RpcSuccessResponse::Pong(res) => res.data.as_ssz_bytes(), RpcSuccessResponse::MetaData(res) => // Encode the correct version of the MetaData response based on the negotiated version. @@ -342,6 +343,7 @@ impl Encoder> for SSZSnappyOutboundCodec { RequestType::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), RequestType::Ping(req) => req.as_ssz_bytes(), RequestType::LightClientBootstrap(req) => req.as_ssz_bytes(), + RequestType::LightClientUpdatesByRange(req) => req.as_ssz_bytes(), // no metadata to encode RequestType::MetaData(_) | RequestType::LightClientOptimisticUpdate @@ -503,6 +505,10 @@ fn context_bytes( return lc_finality_update .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); } + RpcSuccessResponse::LightClientUpdatesByRange(lc_update) => { + return lc_update + .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); + } // These will not pass the has_context_bytes() check RpcSuccessResponse::Status(_) | RpcSuccessResponse::Pong(_) @@ -613,6 +619,11 @@ fn handle_rpc_request( SupportedProtocol::LightClientFinalityUpdateV1 => { Ok(Some(RequestType::LightClientFinalityUpdate)) } + SupportedProtocol::LightClientUpdatesByRangeV1 => { + Ok(Some(RequestType::LightClientUpdatesByRange( + LightClientUpdatesByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))) + } // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. SupportedProtocol::MetaDataV3 => { @@ -795,6 +806,21 @@ fn handle_rpc_response( ), )), }, + SupportedProtocol::LightClientUpdatesByRangeV1 => match fork_name { + Some(fork_name) => Ok(Some(RpcSuccessResponse::LightClientUpdatesByRange( + Arc::new(LightClientUpdate::from_ssz_bytes( + decoded_buffer, + &fork_name, + )?), + ))), + None => Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, // MetaData V2/V3 responses have no context bytes, so behave similarly to V1 responses SupportedProtocol::MetaDataV3 => Ok(Some(RpcSuccessResponse::MetaData(MetaData::V3( MetaDataV3::from_ssz_bytes(decoded_buffer)?, @@ -1214,6 +1240,12 @@ mod tests { ) } RequestType::LightClientOptimisticUpdate | RequestType::LightClientFinalityUpdate => {} + RequestType::LightClientUpdatesByRange(light_client_updates_by_range) => { + assert_eq!( + decoded, + RequestType::LightClientUpdatesByRange(light_client_updates_by_range) + ) + } } } diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index fcb9c98604..42ece6dc4f 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -96,6 +96,7 @@ pub struct RateLimiterConfig { pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, + pub(super) light_client_updates_by_range_quota: Quota, } impl RateLimiterConfig { @@ -121,6 +122,7 @@ impl RateLimiterConfig { pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); + pub const DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA: Quota = Quota::one_every(10); } impl Default for RateLimiterConfig { @@ -140,6 +142,7 @@ impl Default for RateLimiterConfig { light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, light_client_finality_update_quota: Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA, + light_client_updates_by_range_quota: Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA, } } } @@ -198,6 +201,7 @@ impl FromStr for RateLimiterConfig { let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; + let mut light_client_updates_by_range_quota = None; for proto_def in s.split(';') { let ProtocolQuota { protocol, quota } = proto_def.parse()?; @@ -228,6 +232,10 @@ impl FromStr for RateLimiterConfig { light_client_finality_update_quota = light_client_finality_update_quota.or(quota) } + Protocol::LightClientUpdatesByRange => { + light_client_updates_by_range_quota = + light_client_updates_by_range_quota.or(quota) + } } } Ok(RateLimiterConfig { @@ -252,6 +260,8 @@ impl FromStr for RateLimiterConfig { .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA), light_client_finality_update_quota: light_client_finality_update_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA), + light_client_updates_by_range_quota: light_client_updates_by_range_quota + .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_UPDATES_BY_RANGE_QUOTA), }) } } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index dc7d316fb0..bb8bfb0e20 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -14,10 +14,11 @@ use std::sync::Arc; use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; +use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, + LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; /// Maximum length of error message. @@ -477,6 +478,34 @@ impl DataColumnsByRootRequest { } } +/// Request a number of beacon data columns from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct LightClientUpdatesByRangeRequest { + /// The starting period to request light client updates. + pub start_period: u64, + /// The number of periods from `start_period`. + pub count: u64, +} + +impl LightClientUpdatesByRangeRequest { + pub fn max_requested(&self) -> u64 { + MAX_REQUEST_LIGHT_CLIENT_UPDATES + } + + pub fn ssz_min_len() -> usize { + LightClientUpdatesByRangeRequest { + start_period: 0, + count: 0, + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len() -> usize { + Self::ssz_min_len() + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -504,6 +533,9 @@ pub enum RpcSuccessResponse { /// A response to a get LIGHT_CLIENT_FINALITY_UPDATE request. LightClientFinalityUpdate(Arc>), + /// A response to a get LIGHT_CLIENT_UPDATES_BY_RANGE request. + LightClientUpdatesByRange(Arc>), + /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Arc>), @@ -540,6 +572,9 @@ pub enum ResponseTermination { /// Data column sidecars by range stream termination. DataColumnsByRange, + + /// Light client updates by range stream termination. + LightClientUpdatesByRange, } /// The structured response containing a result/code indicating success or failure @@ -639,6 +674,7 @@ impl RpcSuccessResponse { Protocol::LightClientOptimisticUpdate } RpcSuccessResponse::LightClientFinalityUpdate(_) => Protocol::LightClientFinalityUpdate, + RpcSuccessResponse::LightClientUpdatesByRange(_) => Protocol::LightClientUpdatesByRange, } } } @@ -710,6 +746,13 @@ impl std::fmt::Display for RpcSuccessResponse { update.signature_slot() ) } + RpcSuccessResponse::LightClientUpdatesByRange(update) => { + write!( + f, + "LightClientUpdatesByRange Slot: {}", + update.signature_slot(), + ) + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index e3b41ea1df..7d091da766 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -553,6 +553,9 @@ where ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, + ResponseTermination::LightClientUpdatesByRange => { + Protocol::LightClientUpdatesByRange + } }, ), }; diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 67104fbc29..d0dbffe932 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -21,7 +21,8 @@ use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, - LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, SignedBeaconBlock, + LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, Signature, + SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -143,6 +144,13 @@ pub static LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX: LazyLock = LazyLock::new(| LightClientBootstrap::::ssz_max_len_for_fork(ForkName::Electra) }); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_CAPELLA_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Capella)); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Deneb)); +pub static LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX: LazyLock = + LazyLock::new(|| LightClientUpdate::::ssz_max_len_for_fork(ForkName::Electra)); + /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// The number of seconds to wait for the first bytes of a request once a protocol has been @@ -190,6 +198,26 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { } } +fn rpc_light_client_updates_by_range_limits_by_fork(current_fork: ForkName) -> RpcLimits { + let altair_fixed_len = LightClientFinalityUpdateAltair::::ssz_fixed_len(); + + match ¤t_fork { + ForkName::Base => RpcLimits::new(0, 0), + ForkName::Altair | ForkName::Bellatrix => { + RpcLimits::new(altair_fixed_len, altair_fixed_len) + } + ForkName::Capella => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_CAPELLA_MAX) + } + ForkName::Deneb => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX) + } + ForkName::Electra => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX) + } + } +} + fn rpc_light_client_finality_update_limits_by_fork(current_fork: ForkName) -> RpcLimits { let altair_fixed_len = LightClientFinalityUpdateAltair::::ssz_fixed_len(); @@ -286,6 +314,9 @@ pub enum Protocol { /// The `LightClientFinalityUpdate` protocol name. #[strum(serialize = "light_client_finality_update")] LightClientFinalityUpdate, + /// The `LightClientUpdatesByRange` protocol name + #[strum(serialize = "light_client_updates_by_range")] + LightClientUpdatesByRange, } impl Protocol { @@ -304,6 +335,7 @@ impl Protocol { Protocol::LightClientBootstrap => None, Protocol::LightClientOptimisticUpdate => None, Protocol::LightClientFinalityUpdate => None, + Protocol::LightClientUpdatesByRange => None, } } } @@ -334,6 +366,7 @@ pub enum SupportedProtocol { LightClientBootstrapV1, LightClientOptimisticUpdateV1, LightClientFinalityUpdateV1, + LightClientUpdatesByRangeV1, } impl SupportedProtocol { @@ -356,6 +389,7 @@ impl SupportedProtocol { SupportedProtocol::LightClientBootstrapV1 => "1", SupportedProtocol::LightClientOptimisticUpdateV1 => "1", SupportedProtocol::LightClientFinalityUpdateV1 => "1", + SupportedProtocol::LightClientUpdatesByRangeV1 => "1", } } @@ -380,6 +414,7 @@ impl SupportedProtocol { Protocol::LightClientOptimisticUpdate } SupportedProtocol::LightClientFinalityUpdateV1 => Protocol::LightClientFinalityUpdate, + SupportedProtocol::LightClientUpdatesByRangeV1 => Protocol::LightClientUpdatesByRange, } } @@ -542,6 +577,10 @@ impl ProtocolId { ), Protocol::LightClientOptimisticUpdate => RpcLimits::new(0, 0), Protocol::LightClientFinalityUpdate => RpcLimits::new(0, 0), + Protocol::LightClientUpdatesByRange => RpcLimits::new( + LightClientUpdatesByRangeRequest::ssz_min_len(), + LightClientUpdatesByRangeRequest::ssz_max_len(), + ), Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty } } @@ -577,6 +616,9 @@ impl ProtocolId { Protocol::LightClientFinalityUpdate => { rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork()) } + Protocol::LightClientUpdatesByRange => { + rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork()) + } } } @@ -592,7 +634,8 @@ impl ProtocolId { | SupportedProtocol::DataColumnsByRangeV1 | SupportedProtocol::LightClientBootstrapV1 | SupportedProtocol::LightClientOptimisticUpdateV1 - | SupportedProtocol::LightClientFinalityUpdateV1 => true, + | SupportedProtocol::LightClientFinalityUpdateV1 + | SupportedProtocol::LightClientUpdatesByRangeV1 => true, SupportedProtocol::StatusV1 | SupportedProtocol::BlocksByRootV1 | SupportedProtocol::BlocksByRangeV1 @@ -723,6 +766,7 @@ pub enum RequestType { LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, + LightClientUpdatesByRange(LightClientUpdatesByRangeRequest), Ping(Ping), MetaData(MetadataRequest), } @@ -747,6 +791,7 @@ impl RequestType { RequestType::LightClientBootstrap(_) => 1, RequestType::LightClientOptimisticUpdate => 1, RequestType::LightClientFinalityUpdate => 1, + RequestType::LightClientUpdatesByRange(req) => req.count, } } @@ -780,6 +825,9 @@ impl RequestType { RequestType::LightClientFinalityUpdate => { SupportedProtocol::LightClientFinalityUpdateV1 } + RequestType::LightClientUpdatesByRange(_) => { + SupportedProtocol::LightClientUpdatesByRangeV1 + } } } @@ -802,6 +850,7 @@ impl RequestType { RequestType::LightClientBootstrap(_) => unreachable!(), RequestType::LightClientFinalityUpdate => unreachable!(), RequestType::LightClientOptimisticUpdate => unreachable!(), + RequestType::LightClientUpdatesByRange(_) => unreachable!(), } } @@ -861,6 +910,10 @@ impl RequestType { SupportedProtocol::LightClientFinalityUpdateV1, Encoding::SSZSnappy, )], + RequestType::LightClientUpdatesByRange(_) => vec![ProtocolId::new( + SupportedProtocol::LightClientUpdatesByRangeV1, + Encoding::SSZSnappy, + )], } } @@ -879,6 +932,7 @@ impl RequestType { RequestType::LightClientBootstrap(_) => true, RequestType::LightClientOptimisticUpdate => true, RequestType::LightClientFinalityUpdate => true, + RequestType::LightClientUpdatesByRange(_) => true, } } } @@ -997,6 +1051,9 @@ impl std::fmt::Display for RequestType { RequestType::LightClientFinalityUpdate => { write!(f, "Light client finality update request") } + RequestType::LightClientUpdatesByRange(_) => { + write!(f, "Light client updates by range request") + } } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index a8e8f45b6f..ecbacc8c11 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -107,6 +107,8 @@ pub struct RPCRateLimiter { lc_optimistic_update_rl: Limiter, /// LightClientFinalityUpdate rate limiter. lc_finality_update_rl: Limiter, + /// LightClientUpdatesByRange rate limiter. + lc_updates_by_range_rl: Limiter, } /// Error type for non conformant requests @@ -147,6 +149,8 @@ pub struct RPCRateLimiterBuilder { lc_optimistic_update_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. lc_finality_update_quota: Option, + /// Quota for the LightClientUpdatesByRange protocol. + lc_updates_by_range_quota: Option, } impl RPCRateLimiterBuilder { @@ -167,6 +171,7 @@ impl RPCRateLimiterBuilder { Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, + Protocol::LightClientUpdatesByRange => self.lc_updates_by_range_quota = q, } self } @@ -192,6 +197,9 @@ impl RPCRateLimiterBuilder { let lc_finality_update_quota = self .lc_finality_update_quota .ok_or("LightClientFinalityUpdate quota not specified")?; + let lc_updates_by_range_quota = self + .lc_updates_by_range_quota + .ok_or("LightClientUpdatesByRange quota not specified")?; let blbrange_quota = self .blbrange_quota @@ -222,6 +230,7 @@ impl RPCRateLimiterBuilder { let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; + let lc_updates_by_range_rl = Limiter::from_quota(lc_updates_by_range_quota)?; // check for peers to prune every 30 seconds, starting in 30 seconds let prune_every = tokio::time::Duration::from_secs(30); @@ -242,6 +251,7 @@ impl RPCRateLimiterBuilder { lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, + lc_updates_by_range_rl, init_time: Instant::now(), }) } @@ -279,6 +289,7 @@ impl RPCRateLimiter { light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, + light_client_updates_by_range_quota, } = config; Self::builder() @@ -301,6 +312,10 @@ impl RPCRateLimiter { Protocol::LightClientFinalityUpdate, light_client_finality_update_quota, ) + .set_quota( + Protocol::LightClientUpdatesByRange, + light_client_updates_by_range_quota, + ) .build() } @@ -333,6 +348,7 @@ impl RPCRateLimiter { Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, + Protocol::LightClientUpdatesByRange => &mut self.lc_updates_by_range_rl, }; check(limiter) } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index e57e846c33..cb22815390 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; use types::{ BlobSidecar, DataColumnSidecar, EthSpec, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, SignedBeaconBlock, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; use crate::rpc::{ @@ -22,11 +22,6 @@ pub struct SingleLookupReqId { pub req_id: Id, } -/// Request ID for data_columns_by_root requests. Block lookup do not issue this requests directly. -/// Wrapping this particular req_id, ensures not mixing this requests with a custody req_id. -#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] -pub struct DataColumnsByRootRequestId(pub Id); - /// Id of rpc requests sent by sync to the network. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum SyncRequestId { @@ -35,11 +30,19 @@ pub enum SyncRequestId { /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, /// Request searching for a set of data columns given a hash and list of column indices. - DataColumnsByRoot(DataColumnsByRootRequestId, DataColumnsByRootRequester), + DataColumnsByRoot(DataColumnsByRootRequestId), /// Range request that is composed by both a block range request and a blob range request. RangeBlockAndBlobs { id: Id }, } +/// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. +/// Wrapping this particular req_id, ensures not mixing this request with a custody req_id. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct DataColumnsByRootRequestId { + pub id: Id, + pub requester: DataColumnsByRootRequester, +} + #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum DataColumnsByRootRequester { Sampling(SamplingId), @@ -114,6 +117,8 @@ pub enum Response { LightClientOptimisticUpdate(Arc>), /// A response to a LightClientFinalityUpdate request. LightClientFinalityUpdate(Arc>), + /// A response to a LightClientUpdatesByRange request. + LightClientUpdatesByRange(Option>>), } impl std::convert::From> for RpcResponse { @@ -153,6 +158,12 @@ impl std::convert::From> for RpcResponse { Response::LightClientFinalityUpdate(f) => { RpcResponse::Success(RpcSuccessResponse::LightClientFinalityUpdate(f)) } + Response::LightClientUpdatesByRange(f) => match f { + Some(d) => RpcResponse::Success(RpcSuccessResponse::LightClientUpdatesByRange(d)), + None => { + RpcResponse::StreamTermination(ResponseTermination::LightClientUpdatesByRange) + } + }, } } } @@ -173,8 +184,9 @@ impl slog::Value for RequestId { } } +// This custom impl reduces log boilerplate not printing `DataColumnsByRootRequestId` on each id log impl std::fmt::Display for DataColumnsByRootRequestId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) + write!(f, "{} {:?}", self.id, self.requester) } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ea4c3acb42..f3fbd25a90 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -237,6 +237,7 @@ impl Network { gossipsub_config_params, ctx.chain_spec.seconds_per_slot, E::slots_per_epoch(), + config.idontwant_message_size_threshold, ); let score_settings = PeerScoreSettings::new(&ctx.chain_spec, gs_config.mesh_n()); @@ -1578,6 +1579,17 @@ impl Network { request, }) } + RequestType::LightClientUpdatesByRange(_) => { + metrics::inc_counter_vec( + &metrics::TOTAL_RPC_REQUESTS, + &["light_client_updates_by_range"], + ); + Some(NetworkEvent::RequestReceived { + peer_id, + id: (connection_id, request.substream_id), + request, + }) + } } } Ok(RPCReceived::Response(id, resp)) => { @@ -1631,6 +1643,11 @@ impl Network { peer_id, Response::LightClientFinalityUpdate(update), ), + RpcSuccessResponse::LightClientUpdatesByRange(update) => self.build_response( + id, + peer_id, + Response::LightClientUpdatesByRange(Some(update)), + ), } } Ok(RPCReceived::EndOfStream(id, termination)) => { @@ -1641,6 +1658,9 @@ impl Network { ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), + ResponseTermination::LightClientUpdatesByRange => { + Response::LightClientUpdatesByRange(None) + } }; self.build_response(id, peer_id, response) } @@ -1877,16 +1897,7 @@ impl Network { } } SwarmEvent::ListenerError { error, .. } => { - // Ignore quic accept and close errors. - if let Some(error) = error - .get_ref() - .and_then(|err| err.downcast_ref::()) - .filter(|err| matches!(err, libp2p::quic::Error::Connection(_))) - { - debug!(self.log, "Listener closed quic connection"; "reason" => ?error); - } else { - warn!(self.log, "Listener error"; "error" => ?error); - } + debug!(self.log, "Listener closed connection attempt"; "reason" => ?error); None } _ => { diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 6d61bffe3d..500cd23fae 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -38,7 +38,7 @@ smallvec = { workspace = true } rand = { workspace = true } fnv = { workspace = true } alloy-rlp = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } logging = { workspace = true } task_executor = { workspace = true } igd-next = "0.14" diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 9e42aa8e92..4b7e8a50a3 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -5,11 +5,11 @@ use beacon_chain::{ sync_committee_verification::Error as SyncCommitteeError, }; use fnv::FnvHashMap; -pub use lighthouse_metrics::*; use lighthouse_network::{ peer_manager::peerdb::client::ClientKind, types::GossipKind, GossipTopic, Gossipsub, NetworkGlobals, }; +pub use metrics::*; use std::sync::{Arc, LazyLock}; use strum::IntoEnumIterator; use types::EthSpec; diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 3153ce533c..4d875cb4a1 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -4,6 +4,7 @@ use crate::{ service::NetworkMessage, sync::SyncMessage, }; +use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::store::Error; @@ -18,13 +19,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, NotifyExecutionLayer, }; -use beacon_chain::{ - blob_verification::{GossipBlobError, GossipVerifiedBlob}, - data_availability_checker::DataColumnsToPublish, -}; -use lighthouse_network::{ - Client, MessageAcceptance, MessageId, PeerAction, PeerId, PubsubMessage, ReportSource, -}; +use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use operation_pool::ReceivedPreCapella; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; @@ -171,26 +166,6 @@ impl NetworkBeaconProcessor { }) } - pub(crate) fn handle_data_columns_to_publish( - &self, - data_columns_to_publish: DataColumnsToPublish, - ) { - if let Some(data_columns_to_publish) = data_columns_to_publish { - self.send_network_message(NetworkMessage::Publish { - messages: data_columns_to_publish - .iter() - .map(|d| { - let subnet = DataColumnSubnetId::from_column_index::( - d.index as usize, - &self.chain.spec, - ); - PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) - }) - .collect(), - }); - } - } - /// Send a message on `message_tx` that the `message_id` sent by `peer_id` should be propagated on /// the gossip network. /// @@ -1022,9 +997,7 @@ impl NetworkBeaconProcessor { .process_gossip_data_columns(vec![verified_data_column], || Ok(())) .await { - Ok((availability, data_columns_to_publish)) => { - self.handle_data_columns_to_publish(data_columns_to_publish); - + Ok(availability) => { match availability { AvailabilityProcessingStatus::Imported(block_root) => { // Note: Reusing block imported metric here @@ -1052,7 +1025,7 @@ impl NetworkBeaconProcessor { "block_root" => %block_root, ); - // Potentially trigger reconstruction + self.attempt_data_column_reconstruction(block_root).await; } } } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 04571e181d..76f5e886ff 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -2,7 +2,9 @@ use crate::sync::manager::BlockProcessType; use crate::sync::SamplingId; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::block_verification_types::RpcBlock; -use beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend, BeaconChain}; +use beacon_chain::{ + builder::Witness, eth1_chain::CachingEth1Backend, AvailabilityProcessingStatus, BeaconChain, +}; use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorChannels, BeaconProcessorSend, @@ -12,13 +14,14 @@ use beacon_processor::{ use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, + LightClientUpdatesByRangeRequest, }; use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, - Client, MessageId, NetworkGlobals, PeerId, + Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, }; -use slog::{debug, Logger}; +use slog::{debug, error, trace, Logger}; use slot_clock::ManualSlotClock; use std::path::PathBuf; use std::sync::Arc; @@ -829,6 +832,32 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process a `LightClientUpdatesByRange` request from the RPC network. + pub fn send_light_client_updates_by_range_request( + self: &Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + request: LightClientUpdatesByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = move || { + processor.handle_light_client_updates_by_range( + peer_id, + connection_id, + substream_id, + request_id, + request, + ) + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::LightClientUpdatesByRangeRequest(Box::new(process_fn)), + }) + } + /// Send a message to `sync_tx`. /// /// Creates a log if there is an internal error. @@ -848,6 +877,75 @@ impl NetworkBeaconProcessor { "error" => %e) }); } + + /// Attempt to reconstruct all data columns if the following conditions satisfies: + /// - Our custody requirement is all columns + /// - We have >= 50% of columns, but not all columns + /// + /// Returns `Some(AvailabilityProcessingStatus)` if reconstruction is successfully performed, + /// otherwise returns `None`. + async fn attempt_data_column_reconstruction( + &self, + block_root: Hash256, + ) -> Option { + let result = self.chain.reconstruct_data_columns(block_root).await; + match result { + Ok(Some((availability_processing_status, data_columns_to_publish))) => { + self.send_network_message(NetworkMessage::Publish { + messages: data_columns_to_publish + .iter() + .map(|d| { + let subnet = DataColumnSubnetId::from_column_index::( + d.index as usize, + &self.chain.spec, + ); + PubsubMessage::DataColumnSidecar(Box::new((subnet, d.clone()))) + }) + .collect(), + }); + + match &availability_processing_status { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components available via reconstruction"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Block components still missing block after reconstruction"; + "result" => "imported all custody columns", + "block_hash" => %block_root, + ); + } + } + + Some(availability_processing_status) + } + Ok(None) => { + // reason is tracked via the `KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL` metric + trace!( + self.log, + "Reconstruction not required for block"; + "block_hash" => %block_root, + ); + None + } + Err(e) => { + error!( + self.log, + "Error during data column reconstruction"; + "block_root" => %block_root, + "error" => ?e + ); + None + } + } + } } type TestBeaconChainType = diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 88a7616ec7..6d32806713 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -10,6 +10,7 @@ use lighthouse_network::rpc::methods::{ }; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; +use methods::LightClientUpdatesByRangeRequest; use slog::{debug, error, warn}; use slot_clock::SlotClock; use std::collections::{hash_map::Entry, HashMap}; @@ -428,6 +429,105 @@ impl NetworkBeaconProcessor { Ok(()) } + pub fn handle_light_client_updates_by_range( + self: &Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + request: LightClientUpdatesByRangeRequest, + ) { + self.terminate_response_stream( + peer_id, + connection_id, + substream_id, + request_id, + self.clone() + .handle_light_client_updates_by_range_request_inner( + peer_id, + connection_id, + substream_id, + request_id, + request, + ), + Response::LightClientUpdatesByRange, + ); + } + + /// Handle a `LightClientUpdatesByRange` request from the peer. + pub fn handle_light_client_updates_by_range_request_inner( + self: Arc, + peer_id: PeerId, + connection_id: ConnectionId, + substream_id: SubstreamId, + request_id: RequestId, + req: LightClientUpdatesByRangeRequest, + ) -> Result<(), (RpcErrorResponse, &'static str)> { + debug!(self.log, "Received LightClientUpdatesByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_period" => req.start_period, + ); + + // Should not send more than max light client updates + let max_request_size: u64 = req.max_requested(); + if req.count > max_request_size { + return Err(( + RpcErrorResponse::InvalidRequest, + "Request exceeded max size", + )); + } + + let lc_updates = match self + .chain + .get_light_client_updates(req.start_period, req.count) + { + Ok(lc_updates) => lc_updates, + Err(e) => { + error!(self.log, "Unable to obtain light client updates"; + "request" => ?req, + "peer" => %peer_id, + "error" => ?e + ); + return Err((RpcErrorResponse::ServerError, "Database error")); + } + }; + + for lc_update in lc_updates.iter() { + self.send_network_message(NetworkMessage::SendResponse { + peer_id, + response: Response::LightClientUpdatesByRange(Some(Arc::new(lc_update.clone()))), + request_id, + id: (connection_id, substream_id), + }); + } + + let lc_updates_sent = lc_updates.len(); + + if lc_updates_sent < req.count as usize { + debug!( + self.log, + "LightClientUpdatesByRange outgoing response processed"; + "peer" => %peer_id, + "info" => "Failed to return all requested light client updates. The peer may have requested data ahead of whats currently available", + "start_period" => req.start_period, + "requested" => req.count, + "returned" => lc_updates_sent + ); + } else { + debug!( + self.log, + "LightClientUpdatesByRange outgoing response processed"; + "peer" => %peer_id, + "start_period" => req.start_period, + "requested" => req.count, + "returned" => lc_updates_sent + ); + } + + Ok(()) + } + /// Handle a `LightClientBootstrap` request from the peer. pub fn handle_light_client_bootstrap( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index dcad6160b3..82d06c20f8 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -327,34 +327,37 @@ impl NetworkBeaconProcessor { _seen_timestamp: Duration, process_type: BlockProcessType, ) { - let result = self + let mut result = self .chain .process_rpc_custody_columns(custody_columns) .await; match &result { - Ok((availability, data_columns_to_publish)) => { - self.handle_data_columns_to_publish(data_columns_to_publish.clone()); - - match availability { - AvailabilityProcessingStatus::Imported(hash) => { - debug!( - self.log, - "Block components retrieved"; - "result" => "imported block and custody columns", - "block_hash" => %hash, - ); - self.chain.recompute_head_at_current_slot().await; - } - AvailabilityProcessingStatus::MissingComponents(_, _) => { - debug!( - self.log, - "Missing components over rpc"; - "block_hash" => %block_root, - ); + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components retrieved"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Missing components over rpc"; + "block_hash" => %block_root, + ); + // Attempt reconstruction here before notifying sync, to avoid sending out more requests + // that we may no longer need. + if let Some(availability) = + self.attempt_data_column_reconstruction(block_root).await + { + result = Ok(availability) } } - } + }, Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, @@ -374,7 +377,7 @@ impl NetworkBeaconProcessor { self.send_sync_message(SyncMessage::BlockComponentProcessed { process_type, - result: result.map(|(r, _)| r).into(), + result: result.into(), }); } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index f05cb01fa4..e1badfda9d 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -311,6 +311,17 @@ impl Router { rpc_request.id, ), ), + RequestType::LightClientUpdatesByRange(request) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor + .send_light_client_updates_by_range_request( + peer_id, + request_id.0, + request_id.1, + rpc_request.id, + request, + ), + ), _ => {} } } @@ -351,7 +362,8 @@ impl Router { // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) - | Response::LightClientFinalityUpdate(_) => unreachable!(), + | Response::LightClientFinalityUpdate(_) + | Response::LightClientUpdatesByRange(_) => unreachable!(), } } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index f5e68d1512..5a11bca481 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -50,8 +50,6 @@ use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; pub mod common; pub mod parent_chain; mod single_block_lookup; -#[cfg(test)] -mod tests; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a2544b82b5..882f199b52 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -354,14 +354,12 @@ impl SyncManager { } #[cfg(test)] - pub(crate) fn assert_sampling_request_status( + pub(crate) fn get_sampling_request_status( &self, block_root: Hash256, - ongoing: &Vec, - no_peers: &Vec, - ) { - self.sampling - .assert_sampling_request_status(block_root, ongoing, no_peers); + index: &ColumnIndex, + ) -> Option { + self.sampling.get_request_status(block_root, index) } fn network_globals(&self) -> &NetworkGlobals { @@ -474,13 +472,9 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } - SyncRequestId::DataColumnsByRoot(req_id, requester) => self - .on_data_columns_by_root_response( - req_id, - requester, - peer_id, - RpcEvent::RPCError(error), - ), + SyncRequestId::DataColumnsByRoot(req_id) => { + self.on_data_columns_by_root_response(req_id, peer_id, RpcEvent::RPCError(error)) + } SyncRequestId::RangeBlockAndBlobs { id } => { if let Some(sender_id) = self.network.range_request_failed(id) { match sender_id { @@ -1106,10 +1100,9 @@ impl SyncManager { seen_timestamp: Duration, ) { match request_id { - SyncRequestId::DataColumnsByRoot(req_id, requester) => { + SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response( req_id, - requester, peer_id, match data_column { Some(data_column) => RpcEvent::Response(data_column, seen_timestamp), @@ -1151,7 +1144,6 @@ impl SyncManager { fn on_data_columns_by_root_response( &mut self, req_id: DataColumnsByRootRequestId, - requester: DataColumnsByRootRequester, peer_id: PeerId, data_column: RpcEvent>>, ) { @@ -1159,7 +1151,7 @@ impl SyncManager { self.network .on_data_columns_by_root_response(req_id, peer_id, data_column) { - match requester { + match req_id.requester { DataColumnsByRootRequester::Sampling(id) => { if let Some((requester, result)) = self.sampling diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 1dca6f02ac..0f5fd6fb9f 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -9,6 +9,8 @@ mod network_context; mod peer_sampling; mod peer_sync_info; mod range_sync; +#[cfg(test)] +mod tests; pub use lighthouse_network::service::api_types::SamplingId; pub use manager::{BatchProcessResult, SyncMessage}; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 9f9a189817..5f7778ffcc 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -2,7 +2,6 @@ //! channel and stores a global RPC ID to perform requests. use self::custody::{ActiveCustodyRequest, Error as CustodyRequestError}; -use self::requests::{ActiveBlobsByRootRequest, ActiveBlocksByRootRequest}; pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlockRequest}; use super::block_sidecar_coupling::RangeBlockComponentsRequest; use super::manager::BlockProcessType; @@ -30,8 +29,11 @@ use lighthouse_network::service::api_types::{ use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; use rand::seq::SliceRandom; use rand::thread_rng; -use requests::ActiveDataColumnsByRootRequest; pub use requests::LookupVerifyError; +use requests::{ + ActiveRequests, BlobsByRootRequestItems, BlocksByRootRequestItems, + DataColumnsByRootRequestItems, +}; use slog::{debug, error, warn}; use std::collections::hash_map::Entry; use std::collections::HashMap; @@ -180,18 +182,17 @@ pub struct SyncNetworkContext { request_id: Id, /// A mapping of active BlocksByRoot requests, including both current slot and parent lookups. - blocks_by_root_requests: FnvHashMap, - + blocks_by_root_requests: + ActiveRequests>, /// A mapping of active BlobsByRoot requests, including both current slot and parent lookups. - blobs_by_root_requests: FnvHashMap>, + blobs_by_root_requests: ActiveRequests>, + /// A mapping of active DataColumnsByRoot requests + data_columns_by_root_requests: + ActiveRequests>, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, - /// A mapping of active DataColumnsByRoot requests - data_columns_by_root_requests: - FnvHashMap>, - /// BlocksByRange requests paired with BlobsByRange range_block_components_requests: FnvHashMap)>, @@ -239,9 +240,9 @@ impl SyncNetworkContext { network_send, execution_engine_state: EngineState::Online, // always assume `Online` at the start request_id: 1, - blocks_by_root_requests: <_>::default(), - blobs_by_root_requests: <_>::default(), - data_columns_by_root_requests: <_>::default(), + blocks_by_root_requests: ActiveRequests::new("blocks_by_root"), + blobs_by_root_requests: ActiveRequests::new("blobs_by_root"), + data_columns_by_root_requests: ActiveRequests::new("data_columns_by_root"), custody_by_root_requests: <_>::default(), range_block_components_requests: FnvHashMap::default(), network_beacon_processor, @@ -270,34 +271,19 @@ impl SyncNetworkContext { let failed_block_ids = self .blocks_by_root_requests - .iter() - .filter_map(|(id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::SingleBlock { id: *id }) - } else { - None - } - }); + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleBlock { id: *id }); let failed_blob_ids = self .blobs_by_root_requests - .iter() - .filter_map(|(id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::SingleBlob { id: *id }) - } else { - None - } - }); - let failed_data_column_by_root_ids = - self.data_columns_by_root_requests - .iter() - .filter_map(|(req_id, request)| { - if request.peer_id == *peer_id { - Some(SyncRequestId::DataColumnsByRoot(*req_id, request.requester)) - } else { - None - } - }); + .active_requests_of_peer(peer_id) + .into_iter() + .map(|id| SyncRequestId::SingleBlob { id: *id }); + let failed_data_column_by_root_ids = self + .data_columns_by_root_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::DataColumnsByRoot(*req_id)); failed_range_ids .chain(failed_block_ids) @@ -615,8 +601,14 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - self.blocks_by_root_requests - .insert(id, ActiveBlocksByRootRequest::new(request, peer_id)); + self.blocks_by_root_requests.insert( + id, + peer_id, + // true = enforce max_requests as returned for blocks_by_root. We always request a single + // block and the peer must have it. + true, + BlocksByRootRequestItems::new(request), + ); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -676,8 +668,15 @@ impl SyncNetworkContext { }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; - self.blobs_by_root_requests - .insert(id, ActiveBlobsByRootRequest::new(request, peer_id)); + self.blobs_by_root_requests.insert( + id, + peer_id, + // true = enforce max_requests are returned for blobs_by_root. We only issue requests for + // blocks after we know the block has data, and only request peers after they claim to + // have imported the block+blobs. + true, + BlobsByRootRequestItems::new(request), + ); Ok(LookupRequestResult::RequestSent(req_id)) } @@ -688,8 +687,12 @@ impl SyncNetworkContext { requester: DataColumnsByRootRequester, peer_id: PeerId, request: DataColumnsByRootSingleBlockRequest, + expect_max_responses: bool, ) -> Result, &'static str> { - let req_id = DataColumnsByRootRequestId(self.next_id()); + let req_id = DataColumnsByRootRequestId { + id: self.next_id(), + requester, + }; debug!( self.log, "Sending DataColumnsByRoot Request"; @@ -704,12 +707,14 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, request: RequestType::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id, requester)), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id)), })?; self.data_columns_by_root_requests.insert( req_id, - ActiveDataColumnsByRootRequest::new(request, peer_id, requester), + peer_id, + expect_max_responses, + DataColumnsByRootRequestItems::new(request), ); Ok(LookupRequestResult::RequestSent(req_id)) @@ -915,142 +920,74 @@ impl SyncNetworkContext { // Request handlers - pub fn on_single_block_response( + pub(crate) fn on_single_block_response( &mut self, - request_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>> { - let Entry::Occupied(mut request) = self.blocks_by_root_requests.entry(request_id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blocks_by_root"]); - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(block, seen_timestamp) => { - match request.get_mut().add_response(block) { - Ok(block) => Ok((block, seen_timestamp)), - Err(e) => { - // The request must be dropped after receiving an error. - request.remove(); - Err(e.into()) - } + let response = self.blocks_by_root_requests.on_response(id, rpc_event); + let response = response.map(|res| { + res.and_then(|(mut blocks, seen_timestamp)| { + // Enforce that exactly one chunk = one block is returned. ReqResp behavior limits the + // response count to at most 1. + match blocks.pop() { + Some(block) => Ok((block, seen_timestamp)), + // Should never happen, `blocks_by_root_requests` enforces that we receive at least + // 1 chunk. + None => Err(LookupVerifyError::NotEnoughResponsesReturned { actual: 0 }.into()), } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - Err(e) => Err(e.into()), - }, - RpcEvent::RPCError(e) => { - request.remove(); - Err(e.into()) - } - }; - - if let Err(RpcResponseError::VerifyError(e)) = &resp { + }) + }); + if let Some(Err(RpcResponseError::VerifyError(e))) = &response { self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } - Some(resp) + response } - pub fn on_single_blob_response( + pub(crate) fn on_single_blob_response( &mut self, - request_id: SingleLookupReqId, + id: SingleLookupReqId, peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>> { - let Entry::Occupied(mut request) = self.blobs_by_root_requests.entry(request_id) else { - metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["blobs_by_root"]); - return None; - }; - - let resp = match rpc_event { - RpcEvent::Response(blob, seen_timestamp) => { - let request = request.get_mut(); - match request.add_response(blob) { - Ok(Some(blobs)) => to_fixed_blob_sidecar_list(blobs) - .map(|blobs| (blobs, seen_timestamp)) - .map_err(|e| (e.into(), request.resolve())), - Ok(None) => return None, - Err(e) => Err((e.into(), request.resolve())), - } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - // (err, false = not resolved) because terminate returns Ok() if resolved - Err(e) => Err((e.into(), false)), - }, - RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), - }; - - match resp { - Ok(resp) => Some(Ok(resp)), - // Track if this request has already returned some value downstream. Ensure that - // downstream code only receives a single Result per request. If the serving peer does - // multiple penalizable actions per request, downscore and return None. This allows to - // catch if a peer is returning more blobs than requested or if the excess blobs are - // invalid. - Err((e, resolved)) => { - if let RpcResponseError::VerifyError(e) = &e { - self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - if resolved { - None - } else { - Some(Err(e)) - } - } + let response = self.blobs_by_root_requests.on_response(id, rpc_event); + let response = response.map(|res| { + res.and_then( + |(blobs, seen_timestamp)| match to_fixed_blob_sidecar_list(blobs) { + Ok(blobs) => Ok((blobs, seen_timestamp)), + Err(e) => Err(e.into()), + }, + ) + }); + if let Some(Err(RpcResponseError::VerifyError(e))) = &response { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } + response } #[allow(clippy::type_complexity)] - pub fn on_data_columns_by_root_response( + pub(crate) fn on_data_columns_by_root_response( &mut self, id: DataColumnsByRootRequestId, - _peer_id: PeerId, + peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>>> { - let Entry::Occupied(mut request) = self.data_columns_by_root_requests.entry(id) else { - return None; - }; + let resp = self + .data_columns_by_root_requests + .on_response(id, rpc_event); + self.report_rpc_response_errors(resp, peer_id) + } - let resp = match rpc_event { - RpcEvent::Response(data_column, seen_timestamp) => { - let request = request.get_mut(); - match request.add_response(data_column) { - Ok(Some(data_columns)) => Ok((data_columns, seen_timestamp)), - Ok(None) => return None, - Err(e) => Err((e.into(), request.resolve())), - } - } - RpcEvent::StreamTermination => match request.remove().terminate() { - Ok(_) => return None, - // (err, false = not resolved) because terminate returns Ok() if resolved - Err(e) => Err((e.into(), false)), - }, - RpcEvent::RPCError(e) => Err((e.into(), request.remove().resolve())), - }; - - match resp { - Ok(resp) => Some(Ok(resp)), - // Track if this request has already returned some value downstream. Ensure that - // downstream code only receives a single Result per request. If the serving peer does - // multiple penalizable actions per request, downscore and return None. This allows to - // catch if a peer is returning more columns than requested or if the excess blobs are - // invalid. - Err((e, resolved)) => { - if let RpcResponseError::VerifyError(_e) = &e { - // TODO(das): this is a bug, we should not penalise peer in this case. - // confirm this can be removed. - // self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - if resolved { - None - } else { - Some(Err(e)) - } - } + fn report_rpc_response_errors( + &mut self, + resp: Option>, + peer_id: PeerId, + ) -> Option> { + if let Some(Err(RpcResponseError::VerifyError(e))) = &resp { + self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } + resp } /// Insert a downloaded column into an active custody request. Then make progress on the diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index 6736bfb82f..e4bce3dafc 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -283,6 +283,10 @@ impl ActiveCustodyRequest { block_root: self.block_root, indices: indices.clone(), }, + // true = enforce max_requests are returned data_columns_by_root. We only issue requests + // for blocks after we know the block has data, and only request peers after they claim to + // have imported the block+columns and claim to be custodians + true, ) .map_err(Error::SendFailed)?; diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 0c2f59d143..b9214bafcd 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -1,23 +1,187 @@ +use std::{collections::hash_map::Entry, hash::Hash}; + +use beacon_chain::validator_monitor::timestamp_now; +use fnv::FnvHashMap; +use lighthouse_network::PeerId; use strum::IntoStaticStr; use types::Hash256; -pub use blobs_by_root::{ActiveBlobsByRootRequest, BlobsByRootSingleBlockRequest}; -pub use blocks_by_root::{ActiveBlocksByRootRequest, BlocksByRootSingleRequest}; +pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest}; +pub use blocks_by_root::{BlocksByRootRequestItems, BlocksByRootSingleRequest}; pub use data_columns_by_root::{ - ActiveDataColumnsByRootRequest, DataColumnsByRootSingleBlockRequest, + DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; +use crate::metrics; + +use super::{RpcEvent, RpcResponseResult}; + mod blobs_by_root; mod blocks_by_root; mod data_columns_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] pub enum LookupVerifyError { - NoResponseReturned, - NotEnoughResponsesReturned { expected: usize, actual: usize }, + NotEnoughResponsesReturned { actual: usize }, TooManyResponses, UnrequestedBlockRoot(Hash256), UnrequestedIndex(u64), InvalidInclusionProof, DuplicateData, } + +/// Collection of active requests of a single ReqResp method, i.e. `blocks_by_root` +pub struct ActiveRequests { + requests: FnvHashMap>, + name: &'static str, +} + +/// Stateful container for a single active ReqResp request +struct ActiveRequest { + state: State, + peer_id: PeerId, + // Error if the request terminates before receiving max expected responses + expect_max_responses: bool, +} + +enum State { + Active(T), + CompletedEarly, + Errored, +} + +impl ActiveRequests { + pub fn new(name: &'static str) -> Self { + Self { + requests: <_>::default(), + name, + } + } + + pub fn insert(&mut self, id: K, peer_id: PeerId, expect_max_responses: bool, items: T) { + self.requests.insert( + id, + ActiveRequest { + state: State::Active(items), + peer_id, + expect_max_responses, + }, + ); + } + + /// Handle an `RpcEvent` for a specific request index by `id`. + /// + /// Lighthouse ReqResp protocol API promises to send 0 or more `RpcEvent::Response` chunks, + /// and EITHER a single `RpcEvent::RPCError` or RpcEvent::StreamTermination. + /// + /// Downstream code expects to receive a single `Result` value per request ID. However, + /// `add_item` may convert ReqResp success chunks into errors. This function handles the + /// multiple errors / stream termination internally ensuring that a single `Some` is + /// returned. + pub fn on_response( + &mut self, + id: K, + rpc_event: RpcEvent, + ) -> Option>> { + let Entry::Occupied(mut entry) = self.requests.entry(id) else { + metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &[self.name]); + return None; + }; + + match rpc_event { + // Handler of a success ReqResp chunk. Adds the item to the request accumulator. + // `ActiveRequestItems` validates the item before appending to its internal state. + RpcEvent::Response(item, seen_timestamp) => { + let request = &mut entry.get_mut(); + match &mut request.state { + State::Active(items) => { + match items.add(item) { + // Received all items we are expecting for, return early, but keep the request + // struct to handle the stream termination gracefully. + Ok(true) => { + let items = items.consume(); + request.state = State::CompletedEarly; + Some(Ok((items, seen_timestamp))) + } + // Received item, but we are still expecting more + Ok(false) => None, + // Received an invalid item + Err(e) => { + request.state = State::Errored; + Some(Err(e.into())) + } + } + } + // Should never happen, ReqResp network behaviour enforces a max count of chunks + // When `max_remaining_chunks <= 1` a the inbound stream in terminated in + // `rpc/handler.rs`. Handling this case adds complexity for no gain. Even if an + // attacker could abuse this, there's no gain in sending garbage chunks that + // will be ignored anyway. + State::CompletedEarly => None, + // Ignore items after errors. We may want to penalize repeated invalid chunks + // for the same response. But that's an optimization to ban peers sending + // invalid data faster that we choose to not adopt for now. + State::Errored => None, + } + } + RpcEvent::StreamTermination => { + // After stream termination we must forget about this request, there will be no more + // messages coming from the network + let request = entry.remove(); + match request.state { + // Received a stream termination in a valid sequence, consume items + State::Active(mut items) => { + if request.expect_max_responses { + Some(Err(LookupVerifyError::NotEnoughResponsesReturned { + actual: items.consume().len(), + } + .into())) + } else { + Some(Ok((items.consume(), timestamp_now()))) + } + } + // Items already returned, ignore stream termination + State::CompletedEarly => None, + // Returned an error earlier, ignore stream termination + State::Errored => None, + } + } + RpcEvent::RPCError(e) => { + // After an Error event from the network we must forget about this request as this + // may be the last message for this request. + match entry.remove().state { + // Received error while request is still active, propagate error. + State::Active(_) => Some(Err(e.into())), + // Received error after completing the request, ignore the error. This is okay + // because the network has already registered a downscore event if necessary for + // this message. + State::CompletedEarly => None, + // Received a network error after a validity error. Okay to ignore, see above + State::Errored => None, + } + } + } + } + + pub fn active_requests_of_peer(&self, peer_id: &PeerId) -> Vec<&K> { + self.requests + .iter() + .filter(|(_, request)| &request.peer_id == peer_id) + .map(|(id, _)| id) + .collect() + } + + pub fn len(&self) -> usize { + self.requests.len() + } +} + +pub trait ActiveRequestItems { + type Item; + + /// Add a new item into the accumulator. Returns true if all expected items have been received. + fn add(&mut self, item: Self::Item) -> Result; + + /// Return all accumulated items consuming them. + fn consume(&mut self) -> Vec; +} diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs index cb2b1a42ec..fefb27a5ef 100644 --- a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -1,8 +1,8 @@ -use lighthouse_network::{rpc::methods::BlobsByRootRequest, PeerId}; +use lighthouse_network::rpc::methods::BlobsByRootRequest; use std::sync::Arc; use types::{blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Clone)] pub struct BlobsByRootSingleBlockRequest { @@ -25,34 +25,27 @@ impl BlobsByRootSingleBlockRequest { } } -pub struct ActiveBlobsByRootRequest { +pub struct BlobsByRootRequestItems { request: BlobsByRootSingleBlockRequest, - blobs: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, + items: Vec>>, } -impl ActiveBlobsByRootRequest { - pub fn new(request: BlobsByRootSingleBlockRequest, peer_id: PeerId) -> Self { +impl BlobsByRootRequestItems { + pub fn new(request: BlobsByRootSingleBlockRequest) -> Self { Self { request, - blobs: vec![], - resolved: false, - peer_id, + items: vec![], } } +} + +impl ActiveRequestItems for BlobsByRootRequestItems { + type Item = Arc>; /// Appends a chunk to this multi-item request. If all expected chunks are received, this /// method returns `Some`, resolving the request before the stream terminator. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - blob: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, blob: Self::Item) -> Result { let block_root = blob.block_root(); if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); @@ -63,34 +56,16 @@ impl ActiveBlobsByRootRequest { if !self.request.indices.contains(&blob.index) { return Err(LookupVerifyError::UnrequestedIndex(blob.index)); } - if self.blobs.iter().any(|b| b.index == blob.index) { + if self.items.iter().any(|b| b.index == blob.index) { return Err(LookupVerifyError::DuplicateData); } - self.blobs.push(blob); - if self.blobs.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.blobs))) - } else { - Ok(None) - } + self.items.push(blob); + + Ok(self.items.len() >= self.request.indices.len()) } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.blobs.len(), - }) - } - } - - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs index a15d4e3935..f3cdcbe714 100644 --- a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs @@ -1,9 +1,9 @@ use beacon_chain::get_block_root; -use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; +use lighthouse_network::rpc::BlocksByRootRequest; use std::sync::Arc; use types::{ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Copy, Clone)] pub struct BlocksByRootSingleRequest(pub Hash256); @@ -14,47 +14,38 @@ impl BlocksByRootSingleRequest { } } -pub struct ActiveBlocksByRootRequest { +pub struct BlocksByRootRequestItems { request: BlocksByRootSingleRequest, - resolved: bool, - pub(crate) peer_id: PeerId, + items: Vec>>, } -impl ActiveBlocksByRootRequest { - pub fn new(request: BlocksByRootSingleRequest, peer_id: PeerId) -> Self { +impl BlocksByRootRequestItems { + pub fn new(request: BlocksByRootSingleRequest) -> Self { Self { request, - resolved: false, - peer_id, + items: vec![], } } +} + +impl ActiveRequestItems for BlocksByRootRequestItems { + type Item = Arc>; /// Append a response to the single chunk request. If the chunk is valid, the request is /// resolved immediately. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - block: Arc>, - ) -> Result>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, block: Self::Item) -> Result { let block_root = get_block_root(&block); if self.request.0 != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); } - // Valid data, blocks by root expects a single response - self.resolved = true; - Ok(block) + self.items.push(block); + // Always returns true, blocks by root expects a single response + Ok(true) } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NoResponseReturned) - } + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs index a42ae7ca41..1b8d46ff07 100644 --- a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs @@ -1,9 +1,8 @@ -use lighthouse_network::service::api_types::DataColumnsByRootRequester; -use lighthouse_network::{rpc::methods::DataColumnsByRootRequest, PeerId}; +use lighthouse_network::rpc::methods::DataColumnsByRootRequest; use std::sync::Arc; use types::{ChainSpec, DataColumnIdentifier, DataColumnSidecar, EthSpec, Hash256}; -use super::LookupVerifyError; +use super::{ActiveRequestItems, LookupVerifyError}; #[derive(Debug, Clone)] pub struct DataColumnsByRootSingleBlockRequest { @@ -26,40 +25,27 @@ impl DataColumnsByRootSingleBlockRequest { } } -pub struct ActiveDataColumnsByRootRequest { +pub struct DataColumnsByRootRequestItems { request: DataColumnsByRootSingleBlockRequest, items: Vec>>, - resolved: bool, - pub(crate) peer_id: PeerId, - pub(crate) requester: DataColumnsByRootRequester, } -impl ActiveDataColumnsByRootRequest { - pub fn new( - request: DataColumnsByRootSingleBlockRequest, - peer_id: PeerId, - requester: DataColumnsByRootRequester, - ) -> Self { +impl DataColumnsByRootRequestItems { + pub fn new(request: DataColumnsByRootSingleBlockRequest) -> Self { Self { request, items: vec![], - resolved: false, - peer_id, - requester, } } +} + +impl ActiveRequestItems for DataColumnsByRootRequestItems { + type Item = Arc>; /// Appends a chunk to this multi-item request. If all expected chunks are received, this /// method returns `Some`, resolving the request before the stream terminator. /// The active request SHOULD be dropped after `add_response` returns an error - pub fn add_response( - &mut self, - data_column: Arc>, - ) -> Result>>>, LookupVerifyError> { - if self.resolved { - return Err(LookupVerifyError::TooManyResponses); - } - + fn add(&mut self, data_column: Self::Item) -> Result { let block_root = data_column.block_root(); if self.request.block_root != block_root { return Err(LookupVerifyError::UnrequestedBlockRoot(block_root)); @@ -75,29 +61,11 @@ impl ActiveDataColumnsByRootRequest { } self.items.push(data_column); - if self.items.len() >= self.request.indices.len() { - // All expected chunks received, return result early - self.resolved = true; - Ok(Some(std::mem::take(&mut self.items))) - } else { - Ok(None) - } + + Ok(self.items.len() >= self.request.indices.len()) } - pub fn terminate(self) -> Result<(), LookupVerifyError> { - if self.resolved { - Ok(()) - } else { - Err(LookupVerifyError::NotEnoughResponsesReturned { - expected: self.request.indices.len(), - actual: self.items.len(), - }) - } - } - - /// Mark request as resolved (= has returned something downstream) while marking this status as - /// true for future calls. - pub fn resolve(&mut self) -> bool { - std::mem::replace(&mut self.resolved, true) + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) } } diff --git a/beacon_node/network/src/sync/peer_sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs index 086fb0ec8d..7e725f5df5 100644 --- a/beacon_node/network/src/sync/peer_sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -1,4 +1,6 @@ use self::request::ActiveColumnSampleRequest; +#[cfg(test)] +pub(crate) use self::request::Status; use super::network_context::{ DataColumnsByRootSingleBlockRequest, RpcResponseError, SyncNetworkContext, }; @@ -43,15 +45,15 @@ impl Sampling { } #[cfg(test)] - pub fn assert_sampling_request_status( + pub fn get_request_status( &self, block_root: Hash256, - ongoing: &Vec, - no_peers: &Vec, - ) { + index: &ColumnIndex, + ) -> Option { let requester = SamplingRequester::ImportedBlock(block_root); - let active_sampling_request = self.requests.get(&requester).unwrap(); - active_sampling_request.assert_sampling_request_status(ongoing, no_peers); + self.requests + .get(&requester) + .and_then(|req| req.get_request_status(index)) } /// Create a new sampling request for a known block @@ -86,7 +88,11 @@ impl Sampling { } }; - debug!(self.log, "Created new sample request"; "id" => ?id); + debug!(self.log, + "Created new sample request"; + "id" => ?id, + "column_selection" => ?request.column_selection() + ); // TOOD(das): If a node has very little peers, continue_sampling() will attempt to find enough // to sample here, immediately failing the sampling request. There should be some grace @@ -233,18 +239,17 @@ impl ActiveSamplingRequest { } #[cfg(test)] - pub fn assert_sampling_request_status( - &self, - ongoing: &Vec, - no_peers: &Vec, - ) { - for idx in ongoing { - assert!(self.column_requests.get(idx).unwrap().is_ongoing()); - } + pub fn get_request_status(&self, index: &ColumnIndex) -> Option { + self.column_requests.get(index).map(|req| req.status()) + } - for idx in no_peers { - assert!(self.column_requests.get(idx).unwrap().is_no_peers()); - } + /// Return the current ordered list of columns that this requests has to sample to succeed + pub(crate) fn column_selection(&self) -> Vec { + self.column_shuffle + .iter() + .take(REQUIRED_SUCCESSES[0]) + .copied() + .collect() } /// Insert a downloaded column into an active sampling request. Then make progress on the @@ -539,6 +544,10 @@ impl ActiveSamplingRequest { block_root: self.block_root, indices: column_indexes.clone(), }, + // false = We issue request to custodians who may or may not have received the + // samples yet. We don't any signal (like an attestation or status messages that the + // custodian has received data). + false, ) .map_err(SamplingError::SendFailed)?; self.column_indexes_by_sampling_request @@ -584,8 +593,9 @@ mod request { peers_dont_have: HashSet, } + // Exposed only for testing assertions in lookup tests #[derive(Debug, Clone)] - enum Status { + pub(crate) enum Status { NoPeers, NotStarted, Sampling(PeerId), @@ -630,8 +640,8 @@ mod request { } #[cfg(test)] - pub(crate) fn is_no_peers(&self) -> bool { - matches!(self.status, Status::NoPeers) + pub(crate) fn status(&self) -> Status { + self.status.clone() } pub(crate) fn choose_peer( diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 732e4a7bd1..51d9d9da37 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -8,9 +8,9 @@ use crate::sync::{network_context::SyncNetworkContext, BatchOperationOutcome, Ba use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use fnv::FnvHashMap; -use lighthouse_metrics::set_int_gauge; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; +use metrics::set_int_gauge; use rand::seq::SliceRandom; use rand::Rng; use slog::{crit, debug, o, warn}; diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/tests/lookups.rs similarity index 94% rename from beacon_node/network/src/sync/block_lookups/tests.rs rename to beacon_node/network/src/sync/tests/lookups.rs index 0ed624fc0d..9f2c9ef66f 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -1,97 +1,50 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; -use crate::sync::manager::{BlockProcessType, SyncManager}; -use crate::sync::peer_sampling::SamplingConfig; -use crate::sync::range_sync::RangeSyncType; -use crate::sync::{SamplingId, SyncMessage}; +use crate::sync::block_lookups::{ + BlockLookupSummary, PARENT_DEPTH_TOLERANCE, SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS, +}; +use crate::sync::{ + manager::{BlockProcessType, BlockProcessingResult, SyncManager}, + peer_sampling::SamplingConfig, + SamplingId, SyncMessage, +}; use crate::NetworkMessage; use std::sync::Arc; +use std::time::Duration; use super::*; use crate::sync::block_lookups::common::ResponseType; -use beacon_chain::blob_verification::GossipVerifiedBlob; -use beacon_chain::block_verification_types::BlockImportData; -use beacon_chain::builder::Witness; -use beacon_chain::data_availability_checker::Availability; -use beacon_chain::eth1_chain::CachingEth1Backend; -use beacon_chain::test_utils::{ - build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, - BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, -}; -use beacon_chain::validator_monitor::timestamp_now; use beacon_chain::{ - AvailabilityPendingExecutedBlock, PayloadVerificationOutcome, PayloadVerificationStatus, + blob_verification::GossipVerifiedBlob, + block_verification_types::{AsBlock, BlockImportData}, + data_availability_checker::Availability, + test_utils::{ + build_log, generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, + BeaconChainHarness, EphemeralHarnessType, LoggerType, NumBlobs, + }, + validator_monitor::timestamp_now, + AvailabilityPendingExecutedBlock, AvailabilityProcessingStatus, BlockError, + PayloadVerificationOutcome, PayloadVerificationStatus, }; use beacon_processor::WorkEvent; -use lighthouse_network::rpc::{RPCError, RequestType, RpcErrorResponse}; -use lighthouse_network::service::api_types::{ - AppRequestId, DataColumnsByRootRequester, Id, SamplingRequester, SingleLookupReqId, - SyncRequestId, +use lighthouse_network::{ + rpc::{RPCError, RequestType, RpcErrorResponse}, + service::api_types::{ + AppRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, + SamplingRequester, SingleLookupReqId, SyncRequestId, + }, + types::SyncState, + NetworkConfig, NetworkGlobals, PeerId, }; -use lighthouse_network::types::SyncState; -use lighthouse_network::NetworkConfig; -use lighthouse_network::NetworkGlobals; use slog::info; -use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock}; -use store::MemoryStore; +use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; -use types::data_column_sidecar::ColumnIndex; -use types::test_utils::TestRandom; use types::{ - test_utils::{SeedableRng, XorShiftRng}, - BlobSidecar, ForkName, MinimalEthSpec as E, SignedBeaconBlock, Slot, + data_column_sidecar::ColumnIndex, + test_utils::{SeedableRng, TestRandom, XorShiftRng}, + BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ForkName, + Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, }; -use types::{BeaconState, BeaconStateBase}; -use types::{DataColumnSidecar, Epoch}; - -type T = Witness, E, MemoryStore, MemoryStore>; - -/// This test utility enables integration testing of Lighthouse sync components. -/// -/// It covers the following: -/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. -/// 2. Making assertions on `WorkEvent`s received from sync -/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). -/// -/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: -/// +-----------------+ -/// | BeaconProcessor | -/// +---------+-------+ -/// ^ | -/// | | -/// WorkEvent | | SyncMsg -/// | | (Result) -/// | v -/// +--------+ +-----+-----------+ +----------------+ -/// | Router +----------->| SyncManager +------------>| NetworkService | -/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ -/// (RPC resp) | - RangeSync | (RPC req) -/// +-----------------+ -/// | - BackFillSync | -/// +-----------------+ -/// | - BlockLookups | -/// +-----------------+ -struct TestRig { - /// Receiver for `BeaconProcessor` events (e.g. block processing results). - beacon_processor_rx: mpsc::Receiver>, - beacon_processor_rx_queue: Vec>, - /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) - network_rx: mpsc::UnboundedReceiver>, - /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) - network_rx_queue: Vec>, - /// Receiver for `SyncMessage` from the network - sync_rx: mpsc::UnboundedReceiver>, - /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. - sync_manager: SyncManager, - /// To manipulate sync state and peer connection status - network_globals: Arc>, - /// Beacon chain harness - harness: BeaconChainHarness>, - /// `rng` for generating test blocks and blobs. - rng: XorShiftRng, - fork_name: ForkName, - log: Logger, -} const D: Duration = Duration::new(0, 0); const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS; @@ -745,10 +698,10 @@ impl TestRig { let first_dc = data_columns.first().unwrap(); let block_root = first_dc.block_root(); let sampling_request_id = match id.0 { - SyncRequestId::DataColumnsByRoot( - _, - _requester @ DataColumnsByRootRequester::Sampling(sampling_id), - ) => sampling_id.sampling_request_id, + SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { + requester: DataColumnsByRootRequester::Sampling(sampling_id), + .. + }) => sampling_id.sampling_request_id, _ => unreachable!(), }; self.complete_data_columns_by_root_request(id, data_columns); @@ -773,14 +726,15 @@ impl TestRig { data_columns: Vec>>, missing_components: bool, ) { - let lookup_id = - if let SyncRequestId::DataColumnsByRoot(_, DataColumnsByRootRequester::Custody(id)) = - ids.first().unwrap().0 - { - id.requester.0.lookup_id - } else { - panic!("not a custody requester") - }; + let lookup_id = if let SyncRequestId::DataColumnsByRoot(DataColumnsByRootRequestId { + requester: DataColumnsByRootRequester::Custody(id), + .. + }) = ids.first().unwrap().0 + { + id.requester.0.lookup_id + } else { + panic!("not a custody requester") + }; let first_column = data_columns.first().cloned().unwrap(); @@ -1189,6 +1143,7 @@ impl TestRig { penalty_msg, expect_penalty_msg, "Unexpected penalty msg for {peer_id}" ); + self.log(&format!("Found expected penalty {penalty_msg}")); } pub fn expect_single_penalty(&mut self, peer_id: PeerId, expect_penalty_msg: &'static str) { @@ -1319,14 +1274,44 @@ impl TestRig { }); } - fn assert_sampling_request_status( - &self, - block_root: Hash256, - ongoing: &Vec, - no_peers: &Vec, - ) { - self.sync_manager - .assert_sampling_request_status(block_root, ongoing, no_peers) + fn assert_sampling_request_ongoing(&self, block_root: Hash256, indices: &[ColumnIndex]) { + for index in indices { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + if !matches!(status, crate::sync::peer_sampling::Status::Sampling { .. }) { + panic!("expected {block_root} {index} request to be on going: {status:?}"); + } + } + } + + fn assert_sampling_request_nopeers(&self, block_root: Hash256, indices: &[ColumnIndex]) { + for index in indices { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + if !matches!(status, crate::sync::peer_sampling::Status::NoPeers { .. }) { + panic!("expected {block_root} {index} request to be no peers: {status:?}"); + } + } + } + + fn log_sampling_requests(&self, block_root: Hash256, indices: &[ColumnIndex]) { + let statuses = indices + .iter() + .map(|index| { + let status = self + .sync_manager + .get_sampling_request_status(block_root, index) + .unwrap_or_else(|| panic!("No request state for {index}")); + (index, status) + }) + .collect::>(); + self.log(&format!( + "Sampling request status for {block_root}: {statuses:?}" + )); } } @@ -1386,7 +1371,7 @@ fn test_single_block_lookup_empty_response() { // The peer does not have the block. It should be penalized. r.single_lookup_block_response(id, peer_id, None); - r.expect_penalty(peer_id, "NoResponseReturned"); + r.expect_penalty(peer_id, "NotEnoughResponsesReturned"); // it should be retried let id = r.expect_block_lookup_request(block_root); // Send the right block this time. @@ -2099,7 +2084,7 @@ fn sampling_batch_requests() { .pop() .unwrap(); assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); - r.assert_sampling_request_status(block_root, &column_indexes, &vec![]); + r.assert_sampling_request_ongoing(block_root, &column_indexes); // Resolve the request. r.complete_valid_sampling_column_requests( @@ -2127,10 +2112,10 @@ fn sampling_batch_requests_not_enough_responses_returned() { assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); // The request status should be set to Sampling. - r.assert_sampling_request_status(block_root, &column_indexes, &vec![]); + r.assert_sampling_request_ongoing(block_root, &column_indexes); // Split the indexes to simulate the case where the supernode doesn't have the requested column. - let (_column_indexes_supernode_does_not_have, column_indexes_to_complete) = + let (column_indexes_supernode_does_not_have, column_indexes_to_complete) = column_indexes.split_at(1); // Complete the requests but only partially, so a NotEnoughResponsesReturned error occurs. @@ -2145,7 +2130,8 @@ fn sampling_batch_requests_not_enough_responses_returned() { ); // The request status should be set to NoPeers since the supernode, the only peer, returned not enough responses. - r.assert_sampling_request_status(block_root, &vec![], &column_indexes); + r.log_sampling_requests(block_root, &column_indexes); + r.assert_sampling_request_nopeers(block_root, column_indexes_supernode_does_not_have); // The sampling request stalls. r.expect_empty_network(); @@ -2690,11 +2676,6 @@ mod deneb_only { self.blobs.pop().expect("blobs"); self } - fn invalidate_blobs_too_many(mut self) -> Self { - let first_blob = self.blobs.first().expect("blob").clone(); - self.blobs.push(first_blob); - self - } fn expect_block_process(mut self) -> Self { self.rig.expect_block_process(ResponseType::Block); self @@ -2783,21 +2764,6 @@ mod deneb_only { .expect_no_block_request(); } - #[test] - fn single_block_response_then_too_many_blobs_response_attestation() { - let Some(tester) = DenebTester::new(RequestTrigger::AttestationUnknownBlock) else { - return; - }; - tester - .block_response_triggering_process() - .invalidate_blobs_too_many() - .blobs_response() - .expect_penalty("TooManyResponses") - // Network context returns "download success" because the request has enough blobs + it - // downscores the peer for returning too many. - .expect_no_block_request(); - } - // Test peer returning block that has unknown parent, and a new lookup is created #[test] fn parent_block_unknown_parent() { @@ -2838,7 +2804,7 @@ mod deneb_only { }; tester .empty_block_response() - .expect_penalty("NoResponseReturned") + .expect_penalty("NotEnoughResponsesReturned") .expect_block_request() .expect_no_blobs_request() .block_response_and_expect_blob_request() diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs new file mode 100644 index 0000000000..47666b413c --- /dev/null +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -0,0 +1,67 @@ +use crate::sync::manager::SyncManager; +use crate::sync::range_sync::RangeSyncType; +use crate::sync::SyncMessage; +use crate::NetworkMessage; +use beacon_chain::builder::Witness; +use beacon_chain::eth1_chain::CachingEth1Backend; +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use beacon_processor::WorkEvent; +use lighthouse_network::NetworkGlobals; +use slog::Logger; +use slot_clock::ManualSlotClock; +use std::sync::Arc; +use store::MemoryStore; +use tokio::sync::mpsc; +use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; + +mod lookups; +mod range; + +type T = Witness, E, MemoryStore, MemoryStore>; + +/// This test utility enables integration testing of Lighthouse sync components. +/// +/// It covers the following: +/// 1. Sending `SyncMessage` to `SyncManager` to trigger `RangeSync`, `BackFillSync` and `BlockLookups` behaviours. +/// 2. Making assertions on `WorkEvent`s received from sync +/// 3. Making assertion on `NetworkMessage` received from sync (Outgoing RPC requests). +/// +/// The test utility covers testing the interactions from and to `SyncManager`. In diagram form: +/// +-----------------+ +/// | BeaconProcessor | +/// +---------+-------+ +/// ^ | +/// | | +/// WorkEvent | | SyncMsg +/// | | (Result) +/// | v +/// +--------+ +-----+-----------+ +----------------+ +/// | Router +----------->| SyncManager +------------>| NetworkService | +/// +--------+ SyncMsg +-----------------+ NetworkMsg +----------------+ +/// (RPC resp) | - RangeSync | (RPC req) +/// +-----------------+ +/// | - BackFillSync | +/// +-----------------+ +/// | - BlockLookups | +/// +-----------------+ +struct TestRig { + /// Receiver for `BeaconProcessor` events (e.g. block processing results). + beacon_processor_rx: mpsc::Receiver>, + beacon_processor_rx_queue: Vec>, + /// Receiver for `NetworkMessage` (e.g. outgoing RPC requests from sync) + network_rx: mpsc::UnboundedReceiver>, + /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) + network_rx_queue: Vec>, + /// Receiver for `SyncMessage` from the network + sync_rx: mpsc::UnboundedReceiver>, + /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. + sync_manager: SyncManager, + /// To manipulate sync state and peer connection status + network_globals: Arc>, + /// Beacon chain harness + harness: BeaconChainHarness>, + /// `rng` for generating test blocks and blobs. + rng: XorShiftRng, + fork_name: ForkName, + log: Logger, +} diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/beacon_node/network/src/sync/tests/range.rs @@ -0,0 +1 @@ + diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index cbf6284f2a..5b48e3f0d8 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -7,7 +7,7 @@ edition = { workspace = true } [dependencies] derivative = { workspace = true } itertools = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } types = { workspace = true } state_processing = { workspace = true } @@ -25,4 +25,4 @@ tokio = { workspace = true } maplit = { workspace = true } [features] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/beacon_node/operation_pool/src/attestation_id.rs b/beacon_node/operation_pool/src/attestation_id.rs deleted file mode 100644 index f0dc6536a5..0000000000 --- a/beacon_node/operation_pool/src/attestation_id.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; - -/// Serialized `AttestationData` augmented with a domain to encode the fork info. -/// -/// [DEPRECATED] To be removed once all nodes have updated to schema v12. -#[derive( - PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode, Serialize, Deserialize, -)] -pub struct AttestationId { - v: Vec, -} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 0b032b0c8a..3a002bf870 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1,5 +1,4 @@ mod attestation; -mod attestation_id; mod attestation_storage; mod attester_slashing; mod bls_to_execution_changes; diff --git a/beacon_node/operation_pool/src/metrics.rs b/beacon_node/operation_pool/src/metrics.rs index e2a8b43ed1..14088688e5 100644 --- a/beacon_node/operation_pool/src/metrics.rs +++ b/beacon_node/operation_pool/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static BUILD_REWARD_CACHE_TIME: LazyLock> = LazyLock::new(|| { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 1e9611fd1e..dff030fb0f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -401,15 +401,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("self-limiter") - .long("self-limiter") - .help("This flag is deprecated and has no effect.") - .hide(true) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("disable-self-limiter") .long("disable-self-limiter") @@ -525,16 +516,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("http-spec-fork") - .long("http-spec-fork") - .requires("enable_http") - .value_name("FORK") - .help("This flag is deprecated and has no effect.") - .hide(true) - .action(ArgAction::Set) - .display_order(0) - ) .arg( Arg::new("http-enable-tls") .long("http-enable-tls") @@ -564,16 +545,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("http-allow-sync-stalled") - .long("http-allow-sync-stalled") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .requires("enable_http") - .help("This flag is deprecated and has no effect.") - .hide(true) - .display_order(0) - ) .arg( Arg::new("http-sse-capacity-multiplier") .long("http-sse-capacity-multiplier") @@ -659,7 +630,15 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - + .arg( + Arg::new("idontwant-message-size-threshold") + .long("idontwant-message-size-threshold") + .help("Specifies the minimum message size for which IDONTWANT messages are sent. \ + This an optimization strategy to not send IDONTWANT messages for smaller messages.") + .action(ArgAction::Set) + .hide(true) + .display_order(0) + ) /* * Monitoring metrics */ @@ -1283,14 +1262,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("disable-lock-timeouts") - .long("disable-lock-timeouts") - .help("This flag is deprecated and has no effect.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("disable-proposer-reorgs") .long("disable-proposer-reorgs") @@ -1503,14 +1474,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("always-prefer-builder-payload") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .long("always-prefer-builder-payload") - .help("This flag is deprecated and has no effect.") - .display_order(0) - ) .arg( Arg::new("invalid-gossip-verified-blocks-path") .action(ArgAction::Set) @@ -1522,14 +1485,6 @@ pub fn cli_app() -> Command { filling up their disks.") .display_order(0) ) - .arg( - Arg::new("progressive-balances") - .long("progressive-balances") - .value_name("MODE") - .help("Deprecated. This optimisation is now the default and cannot be disabled.") - .action(ArgAction::Set) - .display_order(0) - ) .arg( Arg::new("beacon-processor-max-workers") .long("beacon-processor-max-workers") @@ -1591,13 +1546,5 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("disable-duplicate-warn-logs") - .long("disable-duplicate-warn-logs") - .help("This flag is deprecated and has no effect.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0eff8577c4..2d31815351 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -152,14 +152,6 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.get_one::("http-spec-fork").is_some() { - warn!( - log, - "Ignoring --http-spec-fork"; - "info" => "this flag is deprecated and will be removed" - ); - } - if cli_args.get_flag("http-enable-tls") { client_config.http_api.tls_config = Some(TlsConfig { cert: cli_args @@ -175,14 +167,6 @@ pub fn get_config( }); } - if cli_args.get_flag("http-allow-sync-stalled") { - warn!( - log, - "Ignoring --http-allow-sync-stalled"; - "info" => "this flag is deprecated and will be removed" - ); - } - client_config.http_api.sse_capacity_multiplier = parse_required(cli_args, "http-sse-capacity-multiplier")?; @@ -362,14 +346,6 @@ pub fn get_config( .map(Duration::from_millis); } - if cli_args.get_flag("always-prefer-builder-payload") { - warn!( - log, - "Ignoring --always-prefer-builder-payload"; - "info" => "this flag is deprecated and will be removed" - ); - } - // Set config values from parse values. el_config.secret_file = Some(secret_file.clone()); el_config.execution_endpoint = Some(execution_endpoint.clone()); @@ -787,14 +763,6 @@ pub fn get_config( .individual_tracking_threshold = count; } - if cli_args.get_flag("disable-lock-timeouts") { - warn!( - log, - "Ignoring --disable-lock-timeouts"; - "info" => "this flag is deprecated and will be removed" - ); - } - if cli_args.get_flag("disable-proposer-reorgs") { client_config.chain.re_org_head_threshold = None; client_config.chain.re_org_parent_threshold = None; @@ -894,14 +862,6 @@ pub fn get_config( client_config.network.invalid_block_storage = Some(path); } - if cli_args.get_one::("progressive-balances").is_some() { - warn!( - log, - "Progressive balances mode is deprecated"; - "info" => "please remove --progressive-balances" - ); - } - if let Some(max_workers) = clap_utils::parse_optional(cli_args, "beacon-processor-max-workers")? { client_config.beacon_processor.max_workers = max_workers; @@ -1487,6 +1447,20 @@ pub fn set_network_config( Some(Default::default()) } }; + + if let Some(idontwant_message_size_threshold) = + cli_args.get_one::("idontwant-message-size-threshold") + { + config.idontwant_message_size_threshold = idontwant_message_size_threshold + .parse::() + .map_err(|_| { + format!( + "Invalid idontwant message size threshold value passed: {}", + idontwant_message_size_threshold + ) + })?; + } + Ok(()) } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index cdb18b3b9c..aac1ee26e1 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -20,7 +20,7 @@ safe_arith = { workspace = true } state_processing = { workspace = true } slog = { workspace = true } serde = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } lru = { workspace = true } sloggers = { workspace = true } directory = { workspace = true } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 991f215210..5483c490dc 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -44,7 +44,6 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; -use types::light_client_update::CurrentSyncCommitteeProofLen; use types::*; /// On-disk database that stores finalized states efficiently. @@ -641,15 +640,14 @@ impl, Cold: ItemStore> HotColdDB pub fn get_sync_committee_branch( &self, block_root: &Hash256, - ) -> Result>, Error> { + ) -> Result, Error> { let column = DBColumn::SyncCommitteeBranch; if let Some(bytes) = self .hot_db .get_bytes(column.into(), &block_root.as_ssz_bytes())? { - let sync_committee_branch: FixedVector = - FixedVector::from_ssz_bytes(&bytes)?; + let sync_committee_branch = Vec::::from_ssz_bytes(&bytes)?; return Ok(Some(sync_committee_branch)); } @@ -677,7 +675,7 @@ impl, Cold: ItemStore> HotColdDB pub fn store_sync_committee_branch( &self, block_root: Hash256, - sync_committee_branch: &FixedVector, + sync_committee_branch: &MerkleProof, ) -> Result<(), Error> { let column = DBColumn::SyncCommitteeBranch; self.hot_db.put_bytes( diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 902c440be8..1921b9b327 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *}; +pub use metrics::{set_gauge, try_create_int_gauge, *}; use directory::size_of_dir; use std::path::Path; diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 7fb0b2f4e7..c38ee58e3b 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -15,6 +15,7 @@ * [The `validator-manager` Command](./validator-manager.md) * [Creating validators](./validator-manager-create.md) * [Moving validators](./validator-manager-move.md) + * [Managing validators](./validator-manager-api.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Partial Withdrawals](./partial-withdrawal.md) @@ -54,13 +55,13 @@ * [Merge Migration](./merge-migration.md) * [Late Block Re-orgs](./late-block-re-orgs.md) * [Blobs](./advanced-blobs.md) -* [Built-In Documentation](./help_general.md) +* [Command Line Reference (CLI)](./help_general.md) * [Beacon Node](./help_bn.md) * [Validator Client](./help_vc.md) * [Validator Manager](./help_vm.md) * [Create](./help_vm_create.md) * [Import](./help_vm_import.md) - * [Move](./help_vm_move.md) + * [Move](./help_vm_move.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 6cb6685912..80eba7a059 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -230,7 +230,6 @@ Example Response Body "TERMINAL_TOTAL_DIFFICULTY": "10790000", "TERMINAL_BLOCK_HASH": "0x0000000000000000000000000000000000000000000000000000000000000000", "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH": "18446744073709551615", - "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY": "128", "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT": "16384", "MIN_GENESIS_TIME": "1614588812", "GENESIS_FORK_VERSION": "0x00001020", @@ -263,7 +262,6 @@ Example Response Body "HYSTERESIS_QUOTIENT": "4", "HYSTERESIS_DOWNWARD_MULTIPLIER": "1", "HYSTERESIS_UPWARD_MULTIPLIER": "5", - "SAFE_SLOTS_TO_UPDATE_JUSTIFIED": "8", "MIN_DEPOSIT_AMOUNT": "1000000000", "MAX_EFFECTIVE_BALANCE": "32000000000", "EFFECTIVE_BALANCE_INCREMENT": "1000000000", diff --git a/book/src/cli.md b/book/src/cli.md deleted file mode 100644 index f9e7df0748..0000000000 --- a/book/src/cli.md +++ /dev/null @@ -1,55 +0,0 @@ -# Command-Line Interface (CLI) - -The `lighthouse` binary provides all necessary Ethereum consensus client functionality. It -has two primary sub-commands: - -- `$ lighthouse beacon_node`: the largest and most fundamental component which connects to - the p2p network, processes messages and tracks the head of the beacon - chain. -- `$ lighthouse validator_client`: a lightweight but important component which loads a validators private - key and signs messages using a `beacon_node` as a source-of-truth. - -There are also some ancillary binaries like `lcli` and `account_manager`, but -these are primarily for testing. - -> **Note:** documentation sometimes uses `$ lighthouse bn` and `$ lighthouse -> vc` instead of the long-form `beacon_node` and `validator_client`. These -> commands are valid on the CLI too. - -## Installation - -Typical users may install `lighthouse` to `CARGO_HOME` with `cargo install ---path lighthouse` from the root of the repository. See ["Configuring the -`PATH` environment variable"](https://www.rust-lang.org/tools/install) for more -information. - -For developers, we recommend building Lighthouse using the `$ cargo build --release ---bin lighthouse` command and executing binaries from the -`/target/release` directory. This is more ergonomic when -modifying and rebuilding regularly. - -## Documentation - -Each binary supports the `--help` flag, this is the best source of -documentation. - -```bash -lighthouse beacon_node --help -``` - -```bash -lighthouse validator_client --help -``` - -## Creating a new database/testnet - -Lighthouse should run out-of-the box and connect to the current testnet -maintained by Sigma Prime. - -However, for developers, testnets can be created by following the instructions -outlined in [testnets](./testnets.md). The steps listed here will create a -local database specified to a new testnet. - -## Resuming from an existing database - -Once a database/testnet has been created, it can be resumed by running `$ lighthouse bn`. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 733446e5d2..69701a3ad9 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -292,9 +292,6 @@ Options: which don't improve their payload after the first call, and high values are useful for ensuring the EL is given ample notice. Default: 1/3 of a slot. - --progressive-balances - Deprecated. This optimisation is now the default and cannot be - disabled. --proposer-reorg-cutoff Maximum delay after the start of the slot at which to propose a reorging block. Lower values can prevent failed reorgs by ensuring the @@ -329,14 +326,6 @@ Options: --quic-port6 The UDP port that quic will listen on over IPv6 if listening over both IPv4 and IPv6. Defaults to `port6` + 1 - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --self-limiter-protocols Enables the outbound rate limiter (requests made by this node).Rate limit quotas per protocol can be set in the form of @@ -390,27 +379,6 @@ Options: database. --target-peers The target number of peers. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --trusted-peers One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system. @@ -445,8 +413,6 @@ Flags: incompatible with data availability checks. Checkpoint syncing is the preferred method for syncing a node. Only use this flag when testing. DO NOT use on mainnet! - --always-prefer-builder-payload - This flag is deprecated and has no effect. --always-prepare-payload Send payload attributes with every fork choice update. This is intended for use by block builders, relays and developers. You should @@ -470,8 +436,6 @@ Flags: Explicitly disables syncing of deposit logs from the execution node. This overrides any previous option that depends on it. Useful if you intend to run a non-validating beacon node. - --disable-duplicate-warn-logs - This flag is deprecated and has no effect. --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. This @@ -479,8 +443,6 @@ Flags: boot. --disable-inbound-rate-limiter Disables the inbound rate limiter (requests received by this node). - --disable-lock-timeouts - This flag is deprecated and has no effect. --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning diff --git a/book/src/help_general.md b/book/src/help_general.md index 1c2d1266d0..996b048d10 100644 --- a/book/src/help_general.md +++ b/book/src/help_general.md @@ -1,4 +1,4 @@ -# Lighthouse General Commands +# Lighthouse CLI Reference ``` Ethereum 2.0 client by Sigma Prime. Provides a full-featured beacon node, a @@ -77,39 +77,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir

Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. -V, --version Print version @@ -122,9 +93,6 @@ Flags: debugging specific memory allocation issues. -h, --help Prints help information - -l - DEPRECATED Enables environment logging giving access to sub-protocol - logs such as discv5 and libp2p --log-color Force outputting colors when emitting logs to the terminal. --logfile-compress diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 23a8491993..2cfbfbc857 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -118,14 +118,6 @@ Options: specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes. - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --secrets-dir The directory which contains the password to unlock the validator voting keypairs. Each password should be contained in a file where the @@ -140,27 +132,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validator-registration-batch-size Defines the number of validators per validator/register_validator request sent to the BN. This value can be reduced to avoid timeouts @@ -210,12 +181,6 @@ Flags: If present, do not configure the system allocator. Providing this flag will generally increase memory usage, it should only be provided when debugging specific memory allocation issues. - --disable-run-on-all - DEPRECATED. Use --broadcast. By default, Lighthouse publishes - attestation, sync committee subscriptions and proposer preparation - messages to all beacon nodes provided in the `--beacon-nodes flag`. - This option changes that behaviour such that these api calls only go - out to the first available and synced beacon node --disable-slashing-protection-web3signer Disable Lighthouse's slashing protection for all web3signer keys. This can reduce the I/O burden on the VC but is only safe if slashing @@ -280,8 +245,6 @@ Flags: --prefer-builder-proposals If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. - --produce-block-v3 - This flag is deprecated and is no longer in use. --stdin-inputs If present, read all user inputs from stdin instead of tty. --unencrypted-http-transport diff --git a/book/src/help_vm.md b/book/src/help_vm.md index f787985b21..50c204f371 100644 --- a/book/src/help_vm.md +++ b/book/src/help_vm.md @@ -23,6 +23,11 @@ Commands: "create-validators" command. This command only supports validators signing via a keystore on the local file system (i.e., not Web3Signer validators). + list + Lists all validators in a validator client using the HTTP API. + delete + Deletes one or more validators from a validator client using the HTTP + API. help Print this message or the help of the given subcommand(s) @@ -69,39 +74,10 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-log-timestamp diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index cde822e894..2743117eae 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -91,14 +91,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --suggested-fee-recipient All created validators will use this value for the suggested fee recipient. Omit this flag to use the default value from the VC. @@ -106,27 +98,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. Flags: --disable-deposits diff --git a/book/src/help_vm_import.md b/book/src/help_vm_import.md index 0883139ad2..68aab768ae 100644 --- a/book/src/help_vm_import.md +++ b/book/src/help_vm_import.md @@ -5,9 +5,17 @@ Uploads validators to a validator client using the HTTP API. The validators are defined in a JSON file which can be generated using the "create-validators" command. -Usage: lighthouse validator_manager import [OPTIONS] --validators-file +Usage: lighthouse validator_manager import [OPTIONS] Options: + --builder-boost-factor + When provided, the imported validator will use this percentage + multiplier to apply to the builder's payload value when choosing + between a builder payload header and payload from the local execution + node. + --builder-proposals + When provided, the imported validator will attempt to create blocks + via builder rather than the local EL. [possible values: true, false] -d, --datadir Used to specify a custom root data directory for lighthouse keys and databases. Defaults to $HOME/.lighthouse/{network} where network is @@ -17,6 +25,10 @@ Options: Specifies the verbosity level used when emitting logs to the terminal. [default: info] [possible values: info, debug, trace, warn, error, crit] + --gas-limit + When provided, the imported validator will use this gas limit. It is + recommended to leave this as the default value by not specifying this + flag. --genesis-state-url A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server URLs can generally be used with @@ -26,6 +38,10 @@ Options: --genesis-state-url-timeout The timeout in seconds for the request to --genesis-state-url. [default: 180] + --keystore-file + The path to a keystore JSON file to be imported to the validator + client. This file is usually created using staking-deposit-cli or + ethstaker-deposit-cli --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] @@ -50,49 +66,27 @@ Options: --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. + --password + Password of the keystore file. + --prefer-builder-proposals + When provided, the imported validator will always prefer blocks + constructed by builders, regardless of payload value. [possible + values: true, false] + --suggested-fee-recipient + When provided, the imported validator will use the suggested fee + recipient. Omit this flag to use the default value from the VC. -t, --testnet-dir Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators-file The path to a JSON file containing a list of validators to be imported to the validator client. This file is usually named "validators.json". --vc-token The file containing a token required by the validator client. --vc-url - A HTTP(S) address of a validator client using the keymanager-API. If - this value is not supplied then a 'dry run' will be conducted where no - changes are made to the validator client. [default: - http://localhost:5062] + A HTTP(S) address of a validator client using the keymanager-API. + [default: http://localhost:5062] Flags: --disable-log-timestamp diff --git a/book/src/help_vm_move.md b/book/src/help_vm_move.md index 12dd1e9140..99eee32c78 100644 --- a/book/src/help_vm_move.md +++ b/book/src/help_vm_move.md @@ -74,14 +74,6 @@ Options: If this flag is set, Lighthouse will always prefer blocks constructed by builders, regardless of payload value. [possible values: true, false] - --safe-slots-to-import-optimistically - Used to coordinate manual overrides of the - SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override this parameter in the event of an - attack at the PoS transition block. Incorrect use of this flag can - cause your node to possibly accept an invalid chain or sync more - slowly. Be extremely careful with this flag. --src-vc-token The file containing a token required by the source validator client. --src-vc-url @@ -95,27 +87,6 @@ Options: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. - --terminal-block-hash-epoch-override - Used to coordinate manual overrides to the - TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH parameter. This flag should only - be used if the user has a clear understanding that the broad Ethereum - community has elected to override the terminal PoW block. Incorrect - use of this flag will cause your node to experience a consensus - failure. Be extremely careful with this flag. - --terminal-block-hash-override - Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH - parameter. This flag should only be used if the user has a clear - understanding that the broad Ethereum community has elected to - override the terminal PoW block. Incorrect use of this flag will cause - your node to experience a consensus failure. Be extremely careful with - this flag. - --terminal-total-difficulty-override - Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY - parameter. Accepts a 256-bit decimal integer (not a hex value). This - flag should only be used if the user has a clear understanding that - the broad Ethereum community has elected to override the terminal - difficulty. Incorrect use of this flag will cause your node to - experience a consensus failure. Be extremely careful with this flag. --validators The validators to be moved. Either a list of 0x-prefixed validator pubkeys or the keyword "all". diff --git a/book/src/redundancy.md b/book/src/redundancy.md index ee685a17cf..daf0eb4a5b 100644 --- a/book/src/redundancy.md +++ b/book/src/redundancy.md @@ -74,8 +74,7 @@ lighthouse bn \ Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and `--import-all-attestations` flags. These flags are no longer required as the validator client will now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour -can be disabled using the `--broadcast none` flag for `lighthouse vc` (or `--disable-run-on-all` -[deprecated]). +can be disabled using the `--broadcast none` flag for `lighthouse vc`. ### Broadcast modes diff --git a/book/src/validator-manager-api.md b/book/src/validator-manager-api.md new file mode 100644 index 0000000000..a5fc69fd5a --- /dev/null +++ b/book/src/validator-manager-api.md @@ -0,0 +1,39 @@ +# Managing Validators + +The `lighthouse validator-manager` uses the [Keymanager API](https://ethereum.github.io/keymanager-APIs/#/) to list, import and delete keystores via the HTTP API. This requires the validator client running with the flag `--http`. + +## Delete + +The `delete` command deletes one or more validators from the validator client. It will also modify the `validator_definitions.yml` file automatically so there is no manual action required from the user after the delete. To `delete`: + +```bash +lighthouse vm delete --vc-token --validators pubkey1,pubkey2 +``` + +Example: + +```bash +lighthouse vm delete --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --validators 0x8885c29b8f88ee9b9a37b480fd4384fed74bda33d85bc8171a904847e65688b6c9bb4362d6597fd30109fb2def6c3ae4,0xa262dae3dcd2b2e280af534effa16bedb27c06f2959e114d53bd2a248ca324a018dc73179899a066149471a94a1bc92f +``` + +## Import + +The `import` command imports validator keystores generated by the staking-deposit-cli/ethstaker-deposit-cli. To import a validator keystore: + +```bash +lighthouse vm import --vc-token --keystore-file /path/to/json --password keystore_password +``` + +Example: + +``` +lighthouse vm import --vc-token ~/.lighthouse/mainnet/validators/api-token.txt --keystore-file keystore.json --password keystore_password +``` + +## List + +To list the validators running on the validator client: + +```bash +lighthouse vm list --vc-token ~/.lighthouse/mainnet/validators/api-token.txt +``` diff --git a/book/src/validator-manager-create.md b/book/src/validator-manager-create.md index d97f953fc1..b4c86dc6da 100644 --- a/book/src/validator-manager-create.md +++ b/book/src/validator-manager-create.md @@ -69,6 +69,8 @@ lighthouse \ > Be sure to remove `./validators.json` after the import is successful since it > contains unencrypted validator keystores. +> Note: To import validators with validator-manager using keystore files created using the staking deposit CLI, refer to [Managing Validators](./validator-manager-api.md#import). + ## Detailed Guide This guide will create two validators and import them to a VC. For simplicity, diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 2c8bbbf4b4..c1fa621abb 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -9,13 +9,16 @@ use eth2_wallet::{ use filesystem::{create_with_600_perms, Error as FsError}; use rand::{distributions::Alphanumeric, Rng}; use serde::{Deserialize, Serialize}; -use std::fs::{self, File}; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::str::from_utf8; use std::thread::sleep; use std::time::Duration; +use std::{ + fs::{self, File}, + str::FromStr, +}; use zeroize::Zeroize; pub mod validator_definitions; @@ -215,6 +218,14 @@ pub fn mnemonic_from_phrase(phrase: &str) -> Result { #[serde(transparent)] pub struct ZeroizeString(String); +impl FromStr for ZeroizeString { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(Self(s.to_owned())) + } +} + impl From for ZeroizeString { fn from(s: String) -> Self { Self(s) diff --git a/common/clap_utils/src/lib.rs b/common/clap_utils/src/lib.rs index cba7399c9b..a4b5f4dc1c 100644 --- a/common/clap_utils/src/lib.rs +++ b/common/clap_utils/src/lib.rs @@ -1,6 +1,5 @@ //! A helper library for parsing values from `clap::ArgMatches`. -use alloy_primitives::U256 as Uint256; use clap::builder::styling::*; use clap::ArgMatches; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK}; @@ -30,38 +29,9 @@ pub fn get_eth2_network_config(cli_args: &ArgMatches) -> Result(cli_args, "terminal-total-difficulty-override")? - { - let stripped = string.replace(',', ""); - let terminal_total_difficulty = Uint256::from_str(&stripped).map_err(|e| { - format!( - "Could not parse --terminal-total-difficulty-override as decimal value: {:?}", - e - ) - })?; - - eth2_network_config.config.terminal_total_difficulty = terminal_total_difficulty; - } - - if let Some(hash) = parse_optional(cli_args, "terminal-block-hash-override")? { - eth2_network_config.config.terminal_block_hash = hash; - } - - if let Some(epoch) = parse_optional(cli_args, "terminal-block-hash-epoch-override")? { - eth2_network_config - .config - .terminal_block_hash_activation_epoch = epoch; - } - - if let Some(slots) = parse_optional(cli_args, "safe-slots-to-import-optimistically")? { - eth2_network_config - .config - .safe_slots_to_import_optimistically = slots; - } - Ok(eth2_network_config) } diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml deleted file mode 100644 index fe966f4a9c..0000000000 --- a/common/lighthouse_metrics/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "lighthouse_metrics" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -prometheus = "0.13.0" diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index cac6d073f2..73cbdf44d4 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -9,7 +9,7 @@ test_logger = [] # Print log output to stderr when running tests instead of drop [dependencies] chrono = { version = "0.4", default-features = false, features = ["clock", "std"] } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 0df03c17d0..4bb3739298 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,6 +1,4 @@ -use lighthouse_metrics::{ - inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult, -}; +use metrics::{inc_counter, try_create_int_counter, IntCounter, Result as MetricsResult}; use slog::Logger; use slog_term::Decorator; use std::io::{Result, Write}; diff --git a/common/logging/src/tracing_metrics_layer.rs b/common/logging/src/tracing_metrics_layer.rs index 89a1f4d1f1..5d272adbf5 100644 --- a/common/logging/src/tracing_metrics_layer.rs +++ b/common/logging/src/tracing_metrics_layer.rs @@ -1,6 +1,5 @@ //! Exposes [`MetricsLayer`]: A tracing layer that registers metrics of logging events. -use lighthouse_metrics as metrics; use std::sync::LazyLock; use tracing_log::NormalizeEvent; diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index b91e68c518..79a07eed16 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } libc = "0.2.79" parking_lot = { workspace = true } tikv-jemalloc-ctl = { version = "0.6.0", optional = true, features = ["stats"] } diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 41d8d28291..30313d0672 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -4,7 +4,7 @@ //! https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html //! //! These functions are generally only suitable for Linux systems. -use lighthouse_metrics::*; +use metrics::*; use parking_lot::Mutex; use std::env; use std::os::raw::c_int; @@ -38,60 +38,57 @@ pub static GLOBAL_LOCK: LazyLock> = LazyLock::new(|| <_>::default()); // Metrics for the malloc. For more information, see: // // https://man7.org/linux/man-pages/man3/mallinfo.3.html -pub static MALLINFO_ARENA: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_ARENA: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_arena", "The total amount of memory allocated by means other than mmap(2). \ This figure includes both in-use blocks and blocks on the free list.", ) }); -pub static MALLINFO_ORDBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_ORDBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_ordblks", "The number of ordinary (i.e., non-fastbin) free blocks.", ) }); -pub static MALLINFO_SMBLKS: LazyLock> = +pub static MALLINFO_SMBLKS: LazyLock> = LazyLock::new(|| try_create_int_gauge("mallinfo_smblks", "The number of fastbin free blocks.")); -pub static MALLINFO_HBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_HBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_hblks", "The number of blocks currently allocated using mmap.", ) }); -pub static MALLINFO_HBLKHD: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_HBLKHD: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_hblkhd", "The number of bytes in blocks currently allocated using mmap.", ) }); -pub static MALLINFO_FSMBLKS: LazyLock> = LazyLock::new(|| { +pub static MALLINFO_FSMBLKS: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "mallinfo_fsmblks", "The total number of bytes in fastbin free blocks.", ) }); -pub static MALLINFO_UORDBLKS: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_uordblks", - "The total number of bytes used by in-use allocations.", - ) - }); -pub static MALLINFO_FORDBLKS: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_fordblks", - "The total number of bytes in free blocks.", - ) - }); -pub static MALLINFO_KEEPCOST: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "mallinfo_keepcost", - "The total amount of releasable free space at the top of the heap..", - ) - }); +pub static MALLINFO_UORDBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_uordblks", + "The total number of bytes used by in-use allocations.", + ) +}); +pub static MALLINFO_FORDBLKS: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_fordblks", + "The total number of bytes in free blocks.", + ) +}); +pub static MALLINFO_KEEPCOST: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "mallinfo_keepcost", + "The total amount of releasable free space at the top of the heap..", + ) +}); /// Calls `mallinfo` and updates Prometheus metrics with the results. pub fn scrape_mallinfo_metrics() { diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index a392a74e8f..0e2e00cb0e 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -7,7 +7,7 @@ //! //! A) `JEMALLOC_SYS_WITH_MALLOC_CONF` at compile-time. //! B) `_RJEM_MALLOC_CONF` at runtime. -use lighthouse_metrics::{set_gauge, try_create_int_gauge, IntGauge}; +use metrics::{set_gauge, try_create_int_gauge, IntGauge}; use std::sync::LazyLock; use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; @@ -15,22 +15,22 @@ use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; // Metrics for jemalloc. -pub static NUM_ARENAS: LazyLock> = +pub static NUM_ARENAS: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_num_arenas", "The number of arenas in use")); -pub static BYTES_ALLOCATED: LazyLock> = LazyLock::new(|| { +pub static BYTES_ALLOCATED: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_allocated", "Equivalent to stats.allocated") }); -pub static BYTES_ACTIVE: LazyLock> = +pub static BYTES_ACTIVE: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_active", "Equivalent to stats.active")); -pub static BYTES_MAPPED: LazyLock> = +pub static BYTES_MAPPED: LazyLock> = LazyLock::new(|| try_create_int_gauge("jemalloc_bytes_mapped", "Equivalent to stats.mapped")); -pub static BYTES_METADATA: LazyLock> = LazyLock::new(|| { +pub static BYTES_METADATA: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_metadata", "Equivalent to stats.metadata") }); -pub static BYTES_RESIDENT: LazyLock> = LazyLock::new(|| { +pub static BYTES_RESIDENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_resident", "Equivalent to stats.resident") }); -pub static BYTES_RETAINED: LazyLock> = LazyLock::new(|| { +pub static BYTES_RETAINED: LazyLock> = LazyLock::new(|| { try_create_int_gauge("jemalloc_bytes_retained", "Equivalent to stats.retained") }); diff --git a/common/metrics/Cargo.toml b/common/metrics/Cargo.toml new file mode 100644 index 0000000000..a7f4f4b967 --- /dev/null +++ b/common/metrics/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "metrics" +version = "0.2.0" +edition = { workspace = true } + +[dependencies] +prometheus = { workspace = true } diff --git a/common/lighthouse_metrics/src/lib.rs b/common/metrics/src/lib.rs similarity index 99% rename from common/lighthouse_metrics/src/lib.rs rename to common/metrics/src/lib.rs index 2a1e99defa..1f2ac71aea 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/metrics/src/lib.rs @@ -20,10 +20,10 @@ //! ## Example //! //! ```rust -//! use lighthouse_metrics::*; +//! use metrics::*; //! use std::sync::LazyLock; //! -//! // These metrics are "magically" linked to the global registry defined in `lighthouse_metrics`. +//! // These metrics are "magically" linked to the global registry defined in `metrics`. //! pub static RUN_COUNT: LazyLock> = LazyLock::new(|| try_create_int_counter( //! "runs_total", //! "Total number of runs" diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 55f18edd52..2da32c307e 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -14,7 +14,7 @@ eth2 = { workspace = true } serde_json = { workspace = true } serde = { workspace = true } lighthouse_version = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } slog = { workspace = true } store = { workspace = true } regex = { workspace = true } diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index e157d82c11..2f6c820f56 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -1,5 +1,5 @@ use super::types::{BeaconProcessMetrics, ValidatorProcessMetrics}; -use lighthouse_metrics::{MetricFamily, MetricType}; +use metrics::{MetricFamily, MetricType}; use serde_json::json; use std::collections::HashMap; use std::path::Path; @@ -155,7 +155,7 @@ fn get_value(mf: &MetricFamily) -> Option { /// Collects all metrics and returns a `serde_json::Value` object with the required metrics /// from the metrics hashmap. pub fn gather_metrics(metrics_map: &HashMap) -> Option { - let metric_families = lighthouse_metrics::gather(); + let metric_families = metrics::gather(); let mut res = serde_json::Map::with_capacity(metrics_map.len()); for mf in metric_families.iter() { let metric_name = mf.get_name(); diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index 13bcf006a9..c2f330cd50 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -6,5 +6,5 @@ edition = { workspace = true } [dependencies] types = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } diff --git a/common/slot_clock/src/metrics.rs b/common/slot_clock/src/metrics.rs index 24023c9ed7..ec95e90d4a 100644 --- a/common/slot_clock/src/metrics.rs +++ b/common/slot_clock/src/metrics.rs @@ -1,5 +1,5 @@ use crate::SlotClock; -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; use types::{EthSpec, Slot}; diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 7928d4a3c9..26bcd7b339 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -4,11 +4,17 @@ version = "0.1.0" authors = ["Sigma Prime "] edition = { workspace = true } +[features] +default = ["slog"] +slog = ["dep:slog", "dep:sloggers", "dep:logging"] +tracing = ["dep:tracing"] + [dependencies] async-channel = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } +slog = { workspace = true, optional = true } futures = { workspace = true } -lighthouse_metrics = { workspace = true } -sloggers = { workspace = true } -logging = { workspace = true } +metrics = { workspace = true } +sloggers = { workspace = true, optional = true } +logging = { workspace = true, optional = true } +tracing = { workspace = true, optional = true } diff --git a/common/task_executor/src/lib.rs b/common/task_executor/src/lib.rs index d6edfd3121..92ddb7c0be 100644 --- a/common/task_executor/src/lib.rs +++ b/common/task_executor/src/lib.rs @@ -1,14 +1,20 @@ mod metrics; +#[cfg(not(feature = "tracing"))] pub mod test_utils; use futures::channel::mpsc::Sender; use futures::prelude::*; -use slog::{debug, o, trace}; use std::sync::Weak; use tokio::runtime::{Handle, Runtime}; pub use tokio::task::JoinHandle; +// Set up logging framework +#[cfg(not(feature = "tracing"))] +use slog::{debug, o}; +#[cfg(feature = "tracing")] +use tracing::debug; + /// Provides a reason when Lighthouse is shut down. #[derive(Copy, Clone, Debug, PartialEq)] pub enum ShutdownReason { @@ -79,7 +85,7 @@ pub struct TaskExecutor { /// /// The task must provide a reason for shutting down. signal_tx: Sender, - + #[cfg(not(feature = "tracing"))] log: slog::Logger, } @@ -94,18 +100,20 @@ impl TaskExecutor { pub fn new>( handle: T, exit: async_channel::Receiver<()>, - log: slog::Logger, + #[cfg(not(feature = "tracing"))] log: slog::Logger, signal_tx: Sender, ) -> Self { Self { handle_provider: handle.into(), exit, signal_tx, + #[cfg(not(feature = "tracing"))] log, } } /// Clones the task executor adding a service name. + #[cfg(not(feature = "tracing"))] pub fn clone_with_name(&self, service_name: String) -> Self { TaskExecutor { handle_provider: self.handle_provider.clone(), @@ -115,6 +123,16 @@ impl TaskExecutor { } } + /// Clones the task executor adding a service name. + #[cfg(feature = "tracing")] + pub fn clone(&self) -> Self { + TaskExecutor { + handle_provider: self.handle_provider.clone(), + exit: self.exit.clone(), + signal_tx: self.signal_tx.clone(), + } + } + /// A convenience wrapper for `Self::spawn` which ignores a `Result` as long as both `Ok`/`Err` /// are of type `()`. /// @@ -150,10 +168,13 @@ impl TaskExecutor { drop(timer); }); } else { + #[cfg(not(feature = "tracing"))] debug!( self.log, "Couldn't spawn monitor task. Runtime shutting down" - ) + ); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn monitor task. Runtime shutting down"); } } @@ -175,7 +196,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime. This function does not wrap the task in an `async-channel::Receiver` /// like [spawn](#method.spawn). /// The caller of this function is responsible for wrapping up the task with an `async-channel::Receiver` to - /// ensure that the task gets canceled appropriately. + /// ensure that the task gets cancelled appropriately. /// This function generates prometheus metrics on number of tasks and task duration. /// /// This is useful in cases where the future to be spawned needs to do additional cleanup work when @@ -197,7 +218,10 @@ impl TaskExecutor { if let Some(handle) = self.handle() { handle.spawn(future); } else { + #[cfg(not(feature = "tracing"))] debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); } } } @@ -215,7 +239,7 @@ impl TaskExecutor { /// Spawn a future on the tokio runtime wrapped in an `async-channel::Receiver` returning an optional /// join handle to the future. - /// The task is canceled when the corresponding async-channel is dropped. + /// The task is cancelled when the corresponding async-channel is dropped. /// /// This function generates prometheus metrics on number of tasks and task duration. pub fn spawn_handle( @@ -224,6 +248,8 @@ impl TaskExecutor { name: &'static str, ) -> Option>> { let exit = self.exit(); + + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { @@ -234,12 +260,12 @@ impl TaskExecutor { Some(handle.spawn(async move { futures::pin_mut!(exit); let result = match future::select(Box::pin(task), exit).await { - future::Either::Left((value, _)) => { - trace!(log, "Async task completed"; "task" => name); - Some(value) - } + future::Either::Left((value, _)) => Some(value), future::Either::Right(_) => { + #[cfg(not(feature = "tracing"))] debug!(log, "Async task shutdown, exit received"; "task" => name); + #[cfg(feature = "tracing")] + debug!(task = name, "Async task shutdown, exit received"); None } }; @@ -247,7 +273,10 @@ impl TaskExecutor { result })) } else { - debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(not(feature = "tracing"))] + debug!(log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); None } } else { @@ -270,6 +299,7 @@ impl TaskExecutor { F: FnOnce() -> R + Send + 'static, R: Send + 'static, { + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); let timer = metrics::start_timer_vec(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]); @@ -278,19 +308,22 @@ impl TaskExecutor { let join_handle = if let Some(handle) = self.handle() { handle.spawn_blocking(task) } else { + #[cfg(not(feature = "tracing"))] debug!(self.log, "Couldn't spawn task. Runtime shutting down"); + #[cfg(feature = "tracing")] + debug!("Couldn't spawn task. Runtime shutting down"); return None; }; let future = async move { let result = match join_handle.await { - Ok(result) => { - trace!(log, "Blocking task completed"; "task" => name); - Ok(result) - } - Err(e) => { - debug!(log, "Blocking task ended unexpectedly"; "error" => %e); - Err(e) + Ok(result) => Ok(result), + Err(error) => { + #[cfg(not(feature = "tracing"))] + debug!(log, "Blocking task ended unexpectedly"; "error" => %error); + #[cfg(feature = "tracing")] + debug!(%error, "Blocking task ended unexpectedly"); + Err(error) } }; drop(timer); @@ -321,32 +354,48 @@ impl TaskExecutor { ) -> Option { let timer = metrics::start_timer_vec(&metrics::BLOCK_ON_TASKS_HISTOGRAM, &[name]); metrics::inc_gauge_vec(&metrics::BLOCK_ON_TASKS_COUNT, &[name]); + #[cfg(not(feature = "tracing"))] let log = self.log.clone(); let handle = self.handle()?; let exit = self.exit(); - + #[cfg(not(feature = "tracing"))] debug!( log, "Starting block_on task"; "name" => name ); + #[cfg(feature = "tracing")] + debug!(name, "Starting block_on task"); + handle.block_on(async { let output = tokio::select! { output = future => { + #[cfg(not(feature = "tracing"))] debug!( log, "Completed block_on task"; "name" => name ); + #[cfg(feature = "tracing")] + debug!( + name, + "Completed block_on task" + ); Some(output) }, _ = exit => { + #[cfg(not(feature = "tracing"))] debug!( log, "Cancelled block_on task"; "name" => name, ); + #[cfg(feature = "tracing")] + debug!( + name, + "Cancelled block_on task" + ); None } }; @@ -376,6 +425,7 @@ impl TaskExecutor { } /// Returns a reference to the logger. + #[cfg(not(feature = "tracing"))] pub fn log(&self) -> &slog::Logger { &self.log } diff --git a/common/task_executor/src/metrics.rs b/common/task_executor/src/metrics.rs index a40bfdf4e7..bd4d6a50b9 100644 --- a/common/task_executor/src/metrics.rs +++ b/common/task_executor/src/metrics.rs @@ -1,5 +1,5 @@ /// Handles async task metrics -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static ASYNC_TASKS_COUNT: LazyLock> = LazyLock::new(|| { diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 84f5ce5f18..a9407c392d 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -17,6 +17,6 @@ serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } headers = "0.3.2" -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } serde_array_query = "0.1.0" bytes = { workspace = true } diff --git a/common/warp_utils/src/metrics.rs b/common/warp_utils/src/metrics.rs index 505d277583..fabcf93650 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/warp_utils/src/metrics.rs @@ -1,5 +1,5 @@ use eth2::lighthouse::{ProcessHealth, SystemHealth}; -use lighthouse_metrics::*; +use metrics::*; use std::sync::LazyLock; pub static PROCESS_NUM_THREADS: LazyLock> = LazyLock::new(|| { diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 4a4f6e9086..b32e0aa665 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -12,7 +12,7 @@ state_processing = { workspace = true } proto_array = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } slog = { workspace = true } [dev-dependencies] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index ca59a6adfb..85704042df 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1300,43 +1300,6 @@ where } } - /// Returns `Ok(false)` if a block is not viable to be imported optimistically. - /// - /// ## Notes - /// - /// Equivalent to the function with the same name in the optimistic sync specs: - /// - /// https://github.com/ethereum/consensus-specs/blob/dev/sync/optimistic.md#helpers - pub fn is_optimistic_candidate_block( - &self, - current_slot: Slot, - block_slot: Slot, - block_parent_root: &Hash256, - spec: &ChainSpec, - ) -> Result> { - // If the block is sufficiently old, import it. - if block_slot + spec.safe_slots_to_import_optimistically <= current_slot { - return Ok(true); - } - - // If the parent block has execution enabled, always import the block. - // - // See: - // - // https://github.com/ethereum/consensus-specs/pull/2844 - if self - .proto_array - .get_block(block_parent_root) - .map_or(false, |parent| { - parent.execution_status.is_execution_enabled() - }) - { - return Ok(true); - } - - Ok(false) - } - /// Return the current finalized checkpoint. pub fn finalized_checkpoint(&self) -> Checkpoint { *self.fc_store.finalized_checkpoint() diff --git a/consensus/fork_choice/src/metrics.rs b/consensus/fork_choice/src/metrics.rs index eb0dbf435e..b5cda2f587 100644 --- a/consensus/fork_choice/src/metrics.rs +++ b/consensus/fork_choice/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; use types::EthSpec; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ce19d68203..29265e34e4 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -256,36 +256,6 @@ impl ForkChoiceTest { self } - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - /// - /// If the chain is presently in an unsafe period, transition through it and the following safe - /// period. - /// - /// Note: the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` variable has been removed - /// from the fork choice spec in Q1 2023. We're still leaving references to - /// it in our tests because (a) it's easier and (b) it allows us to easily - /// test for the absence of that parameter. - pub fn move_to_next_unsafe_period(self) -> Self { - self.move_inside_safe_to_update() - .move_outside_safe_to_update() - } - - /// Moves to the next slot that is *outside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_outside_safe_to_update(self) -> Self { - while is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - - /// Moves to the next slot that is *inside* the `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` range. - pub fn move_inside_safe_to_update(self) -> Self { - while !is_safe_to_update(self.harness.chain.slot().unwrap(), &self.harness.chain.spec) { - self.harness.advance_slot() - } - self - } - /// Applies a block directly to fork choice, bypassing the beacon chain. /// /// Asserts the block was applied successfully. @@ -516,10 +486,6 @@ impl ForkChoiceTest { } } -fn is_safe_to_update(slot: Slot, spec: &ChainSpec) -> bool { - slot % E::slots_per_epoch() < spec.safe_slots_to_update_justified -} - #[test] fn justified_and_finalized_blocks() { let tester = ForkChoiceTest::new(); @@ -536,15 +502,13 @@ fn justified_and_finalized_blocks() { assert!(fork_choice.get_finalized_block().is_ok()); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` +/// - The new justified checkpoint descends from the current. Near genesis. #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent_first_justification() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() - .move_inside_safe_to_update() .assert_justified_epoch(0) .apply_blocks(1) .await @@ -552,49 +516,29 @@ async fn justified_checkpoint_updates_with_descendent_inside_safe_slots() { } /// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - This is **not** the first justification since genesis #[tokio::test] -async fn justified_checkpoint_updates_with_descendent_outside_safe_slots() { +async fn justified_checkpoint_updates_with_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch <= 2) .await .unwrap() - .move_outside_safe_to_update() .assert_justified_epoch(2) .apply_blocks(1) .await .assert_justified_epoch(3); } -/// - The new justified checkpoint descends from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - This is the first justification since genesis -#[tokio::test] -async fn justified_checkpoint_updates_first_justification_outside_safe_to_update() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .move_to_next_unsafe_period() - .assert_justified_epoch(0) - .apply_blocks(1) - .await - .assert_justified_epoch(2); -} - /// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` /// - Finalized epoch has **not** increased. #[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_without_finality() { +async fn justified_checkpoint_updates_with_non_descendent() { ForkChoiceTest::new() .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) .await .unwrap() .apply_blocks(1) .await - .move_inside_safe_to_update() .assert_justified_epoch(2) .apply_block_directly_to_fork_choice(|_, state| { // The finalized checkpoint should not change. @@ -611,64 +555,6 @@ async fn justified_checkpoint_updates_with_non_descendent_inside_safe_slots_with .assert_justified_epoch(3); } -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED`. -/// - Finalized epoch has **not** increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_without_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_to_next_unsafe_period() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should not change. - state.finalized_checkpoint().epoch = Epoch::new(0); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await - // Now that `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` has been removed, the new - // block should have updated the justified checkpoint. - .assert_justified_epoch(3); -} - -/// - The new justified checkpoint **does not** descend from the current. -/// - Current slot is **not** within `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` -/// - Finalized epoch has increased. -#[tokio::test] -async fn justified_checkpoint_updates_with_non_descendent_outside_safe_slots_with_finality() { - ForkChoiceTest::new() - .apply_blocks_while(|_, state| state.current_justified_checkpoint().epoch == 0) - .await - .unwrap() - .apply_blocks(1) - .await - .move_to_next_unsafe_period() - .assert_justified_epoch(2) - .apply_block_directly_to_fork_choice(|_, state| { - // The finalized checkpoint should change. - state.finalized_checkpoint_mut().epoch = Epoch::new(1); - - // The justified checkpoint has changed. - state.current_justified_checkpoint_mut().epoch = Epoch::new(3); - // The new block should **not** include the current justified block as an ancestor. - state.current_justified_checkpoint_mut().root = *state - .get_block_root(Epoch::new(1).start_slot(E::slots_per_epoch())) - .unwrap(); - }) - .await - .assert_justified_epoch(3); -} - /// Check that the balances are obtained correctly. #[tokio::test] async fn justified_balances() { diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index 7b7c6eb0c4..b7f6ef7b2a 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -25,7 +25,7 @@ ethereum_hashing = { workspace = true } int_to_bytes = { workspace = true } smallvec = { workspace = true } arbitrary = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } derivative = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } rand = { workspace = true } diff --git a/consensus/state_processing/src/common/update_progressive_balances_cache.rs b/consensus/state_processing/src/common/update_progressive_balances_cache.rs index af843b3acb..101e861683 100644 --- a/consensus/state_processing/src/common/update_progressive_balances_cache.rs +++ b/consensus/state_processing/src/common/update_progressive_balances_cache.rs @@ -4,7 +4,7 @@ use crate::metrics::{ PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL, }; use crate::{BlockProcessingError, EpochProcessingError}; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use types::{ is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EpochTotalBalances, EthSpec, ParticipationFlags, ProgressiveBalancesCache, Validator, diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index e6fe483776..b53dee96d9 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; /* diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 81998906cf..5d862899bc 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -470,59 +470,7 @@ pub fn apply_deposit( return Ok(()); } - // [Modified in Electra:EIP7251] - if state.fork_name_unchecked() >= ForkName::Electra { - let amount = 0; - // Create a new validator. - let mut validator = Validator { - pubkey: deposit_data.pubkey, - withdrawal_credentials: deposit_data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: 0, - slashed: false, - }; - let max_effective_balance = - validator.get_max_effective_balance(spec, state.fork_name_unchecked()); - validator.effective_balance = std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - max_effective_balance, - ); - - state.validators_mut().push(validator)?; - // state balances is set to 0 in electra - state.balances_mut().push(0)?; - } else { - let validator = Validator { - pubkey: deposit_data.pubkey, - withdrawal_credentials: deposit_data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), - slashed: false, - }; - state.validators_mut().push(validator)?; - // state balances is set to 0 in electra - state.balances_mut().push(amount)?; - }; - - // Altair or later initializations. - if let Ok(previous_epoch_participation) = state.previous_epoch_participation_mut() { - previous_epoch_participation.push(ParticipationFlags::default())?; - } - if let Ok(current_epoch_participation) = state.current_epoch_participation_mut() { - current_epoch_participation.push(ParticipationFlags::default())?; - } - if let Ok(inactivity_scores) = state.inactivity_scores_mut() { - inactivity_scores.push(0)?; - } + state.add_validator_to_registry(&deposit_data, spec)?; // [New in Electra:EIP7251] if let Ok(pending_deposits) = state.pending_deposits_mut() { diff --git a/consensus/types/presets/gnosis/phase0.yaml b/consensus/types/presets/gnosis/phase0.yaml index 87c73e6fb7..48129cb47e 100644 --- a/consensus/types/presets/gnosis/phase0.yaml +++ b/consensus/types/presets/gnosis/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/mainnet/phase0.yaml b/consensus/types/presets/mainnet/phase0.yaml index 89bb97d6a8..02bc96c8cd 100644 --- a/consensus/types/presets/mainnet/phase0.yaml +++ b/consensus/types/presets/mainnet/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/presets/minimal/phase0.yaml b/consensus/types/presets/minimal/phase0.yaml index c9c81325f1..1f75603142 100644 --- a/consensus/types/presets/minimal/phase0.yaml +++ b/consensus/types/presets/minimal/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**1 (= 1) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 38bbf0feee..99f5ea7756 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1548,6 +1548,35 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + pub fn add_validator_to_registry( + &mut self, + deposit_data: &DepositData, + spec: &ChainSpec, + ) -> Result<(), Error> { + let fork = self.fork_name_unchecked(); + let amount = if fork.electra_enabled() { + 0 + } else { + deposit_data.amount + }; + self.validators_mut() + .push(Validator::from_deposit(deposit_data, amount, fork, spec))?; + self.balances_mut().push(amount)?; + + // Altair or later initializations. + if let Ok(previous_epoch_participation) = self.previous_epoch_participation_mut() { + previous_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(current_epoch_participation) = self.current_epoch_participation_mut() { + current_epoch_participation.push(ParticipationFlags::default())?; + } + if let Ok(inactivity_scores) = self.inactivity_scores_mut() { + inactivity_scores.push(0)?; + } + + Ok(()) + } + /// Safe copy-on-write accessor for the `validators` list. pub fn get_validator_cow( &mut self, @@ -2430,33 +2459,64 @@ impl BeaconState { Ok(()) } - pub fn compute_merkle_proof(&self, generalized_index: usize) -> Result, Error> { - // 1. Convert generalized index to field index. - let field_index = match generalized_index { + pub fn compute_current_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX - | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { - // Sync committees are top-level fields, subtract off the generalized indices - // for the internal nodes. Result should be 22 or 23, the field offset of the committee - // in the `BeaconState`: - // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate - generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - light_client_update::FINALIZED_ROOT_INDEX => { - // Finalized root is the right child of `finalized_checkpoint`, divide by two to get - // the generalized index of `state.finalized_checkpoint`. - let finalized_checkpoint_generalized_index = generalized_index / 2; - // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches - // position of `finalized_checkpoint` in `BeaconState`. - finalized_checkpoint_generalized_index - .checked_sub(self.num_fields_pow2()) - .ok_or(Error::IndexNotSupported(generalized_index))? - } - _ => return Err(Error::IndexNotSupported(generalized_index)), }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } - // 2. Get all `BeaconState` leaves. + pub fn compute_next_sync_committee_proof(&self) -> Result, Error> { + // Sync committees are top-level fields, subtract off the generalized indices + // for the internal nodes. Result should be 22 or 23, the field offset of the committee + // in the `BeaconState`: + // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate + let field_index = if self.fork_name_unchecked().electra_enabled() { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + } else { + light_client_update::NEXT_SYNC_COMMITTEE_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + self.generate_proof(field_index, &leaves) + } + + pub fn compute_finalized_root_proof(&self) -> Result, Error> { + // Finalized root is the right child of `finalized_checkpoint`, divide by two to get + // the generalized index of `state.finalized_checkpoint`. + let field_index = if self.fork_name_unchecked().electra_enabled() { + // Index should be 169/2 - 64 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + } else { + // Index should be 105/2 - 32 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState` + light_client_update::FINALIZED_ROOT_INDEX + }; + let leaves = self.get_beacon_state_leaves(); + let mut proof = self.generate_proof(field_index, &leaves)?; + proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); + Ok(proof) + } + + fn generate_proof( + &self, + field_index: usize, + leaves: &[Hash256], + ) -> Result, Error> { + let depth = self.num_fields_pow2().ilog2() as usize; + let tree = merkle_proof::MerkleTree::create(leaves, depth); + let (_, proof) = tree.generate_proof(field_index, depth)?; + Ok(proof) + } + + fn get_beacon_state_leaves(&self) -> Vec { let mut leaves = vec![]; #[allow(clippy::arithmetic_side_effects)] match self { @@ -2492,18 +2552,7 @@ impl BeaconState { } }; - // 3. Make deposit tree. - // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). - let depth = light_client_update::CURRENT_SYNC_COMMITTEE_PROOF_LEN; - let tree = merkle_proof::MerkleTree::create(&leaves, depth); - let (_, mut proof) = tree.generate_proof(field_index, depth)?; - - // 4. If we're proving the finalized root, patch in the finalized epoch to complete the proof. - if generalized_index == light_client_update::FINALIZED_ROOT_INDEX { - proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); - } - - Ok(proof) + leaves } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index d8b75260b6..1c4effb4ae 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -114,7 +114,6 @@ pub struct ChainSpec { /* * Fork choice */ - pub safe_slots_to_update_justified: u64, pub proposer_score_boost: Option, pub reorg_head_weight_threshold: Option, pub reorg_parent_weight_threshold: Option, @@ -157,7 +156,6 @@ pub struct ChainSpec { pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, pub terminal_block_hash_activation_epoch: Epoch, - pub safe_slots_to_import_optimistically: u64, /* * Capella hard fork params @@ -705,7 +703,6 @@ impl ChainSpec { /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -756,7 +753,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -886,7 +882,6 @@ impl ChainSpec { inactivity_penalty_quotient: u64::checked_pow(2, 25).expect("pow does not overflow"), min_slashing_penalty_quotient: 64, proportional_slashing_multiplier: 2, - safe_slots_to_update_justified: 2, // Altair epochs_per_sync_committee_period: Epoch::new(8), altair_fork_version: [0x01, 0x00, 0x00, 0x01], @@ -1026,7 +1021,6 @@ impl ChainSpec { /* * Fork choice */ - safe_slots_to_update_justified: 8, proposer_score_boost: Some(40), reorg_head_weight_threshold: Some(20), reorg_parent_weight_threshold: Some(160), @@ -1077,7 +1071,6 @@ impl ChainSpec { .expect("terminal_total_difficulty is a valid integer"), terminal_block_hash: ExecutionBlockHash::zero(), terminal_block_hash_activation_epoch: Epoch::new(u64::MAX), - safe_slots_to_import_optimistically: 128u64, /* * Capella hard fork params @@ -1212,9 +1205,6 @@ pub struct Config { pub terminal_block_hash: ExecutionBlockHash, #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, - #[serde(default = "default_safe_slots_to_import_optimistically")] - #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_import_optimistically: u64, #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, @@ -1425,10 +1415,6 @@ fn default_terminal_block_hash_activation_epoch() -> Epoch { Epoch::new(u64::MAX) } -fn default_safe_slots_to_import_optimistically() -> u64 { - 128u64 -} - fn default_subnets_per_node() -> u8 { 2u8 } @@ -1649,7 +1635,6 @@ impl Config { terminal_total_difficulty: spec.terminal_total_difficulty, terminal_block_hash: spec.terminal_block_hash, terminal_block_hash_activation_epoch: spec.terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically: spec.safe_slots_to_import_optimistically, min_genesis_active_validator_count: spec.min_genesis_active_validator_count, min_genesis_time: spec.min_genesis_time, @@ -1751,7 +1736,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, min_genesis_active_validator_count, min_genesis_time, genesis_fork_version, @@ -1851,7 +1835,6 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - safe_slots_to_import_optimistically, gossip_max_size, min_epochs_for_block_requests, max_chunk_size, @@ -2103,7 +2086,6 @@ mod yaml_tests { #TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638911 #TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000001 #TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551614 - #SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY: 2 MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384 MIN_GENESIS_TIME: 1606824000 GENESIS_FORK_VERSION: 0x00000000 @@ -2152,7 +2134,6 @@ mod yaml_tests { check_default!(terminal_total_difficulty); check_default!(terminal_block_hash); check_default!(terminal_block_hash_activation_epoch); - check_default!(safe_slots_to_import_optimistically); check_default!(bellatrix_fork_version); check_default!(gossip_max_size); check_default!(min_epochs_for_block_requests); diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index b1e9049b0d..c80d678b2a 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -41,6 +41,7 @@ pub struct ConfigAndPreset { } impl ConfigAndPreset { + // DEPRECATED: the `fork_name` argument is never used, we should remove it. pub fn from_chain_spec(spec: &ChainSpec, fork_name: Option) -> Self { let config = Config::from_chain_spec::(spec); let base_preset = BasePreset::from_chain_spec::(spec); diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index c28356930b..6eaa559404 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -200,7 +200,7 @@ pub use crate::light_client_optimistic_update::{ }; pub use crate::light_client_update::{ Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, + LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, MerkleProof, }; pub use crate::participation_flags::ParticipationFlags; pub use crate::payload::{ diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 25f029bcc0..21a7e5416f 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -57,7 +57,16 @@ pub struct LightClientBootstrap { /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "current_sync_committee_branch_altair") + )] pub current_sync_committee_branch: FixedVector, + #[superstruct( + only(Electra), + partial_getter(rename = "current_sync_committee_branch_electra") + )] + pub current_sync_committee_branch: FixedVector, } impl LightClientBootstrap { @@ -115,7 +124,7 @@ impl LightClientBootstrap { pub fn new( block: &SignedBlindedBeaconBlock, current_sync_committee: Arc>, - current_sync_committee_branch: FixedVector, + current_sync_committee_branch: Vec, chain_spec: &ChainSpec, ) -> Result { let light_client_bootstrap = match block @@ -126,22 +135,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -155,9 +164,7 @@ impl LightClientBootstrap { ) -> Result { let mut header = beacon_state.latest_block_header().clone(); header.state_root = beacon_state.update_tree_hash_cache()?; - let current_sync_committee_branch = - FixedVector::new(beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?)?; - + let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); let light_client_bootstrap = match block @@ -168,22 +175,22 @@ impl LightClientBootstrap { ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch, + current_sync_committee_branch: current_sync_committee_branch.into(), }), }; @@ -210,8 +217,28 @@ impl ForkVersionDeserialize for LightClientBootstrap { #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientBootstrapAltair, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapAltair); + } - ssz_tests!(LightClientBootstrapDeneb); + #[cfg(test)] + mod capella { + use crate::{LightClientBootstrapCapella, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientBootstrapDeneb, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientBootstrapElectra, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapElectra); + } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 91ee58b4be..ba2f2083cd 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -63,8 +63,13 @@ pub struct LightClientFinalityUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. - #[test_random(default)] + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FixedVector, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FixedVector, /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -75,7 +80,7 @@ impl LightClientFinalityUpdate { pub fn new( attested_block: &SignedBlindedBeaconBlock, finalized_block: &SignedBlindedBeaconBlock, - finality_branch: FixedVector, + finality_branch: Vec, sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, @@ -92,7 +97,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }) @@ -104,7 +109,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -115,7 +120,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -126,7 +131,7 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate, signature_slot, }), @@ -226,8 +231,28 @@ impl ForkVersionDeserialize for LightClientFinalityUpdate { #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientFinalityUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateAltair); + } - ssz_tests!(LightClientFinalityUpdateDeneb); + #[cfg(test)] + mod capella { + use crate::{LightClientFinalityUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientFinalityUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientFinalityUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateElectra); + } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index fecdc39533..52800f18ac 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -307,3 +307,31 @@ impl ForkVersionDeserialize for LightClientHeader { } } } + +#[cfg(test)] +mod tests { + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientHeaderAltair, MainnetEthSpec}; + ssz_tests!(LightClientHeaderAltair); + } + + #[cfg(test)] + mod capella { + use crate::{LightClientHeaderCapella, MainnetEthSpec}; + ssz_tests!(LightClientHeaderCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientHeaderDeneb, MainnetEthSpec}; + ssz_tests!(LightClientHeaderDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientHeaderElectra, MainnetEthSpec}; + ssz_tests!(LightClientHeaderElectra); + } +} diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 2f8cc034eb..209388af87 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -214,8 +214,28 @@ impl ForkVersionDeserialize for LightClientOptimisticUpdate { #[cfg(test)] mod tests { - use super::*; - use crate::MainnetEthSpec; + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use crate::{LightClientOptimisticUpdateAltair, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateAltair); + } - ssz_tests!(LightClientOptimisticUpdateDeneb); + #[cfg(test)] + mod capella { + use crate::{LightClientOptimisticUpdateCapella, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use crate::{LightClientOptimisticUpdateDeneb, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use crate::{LightClientOptimisticUpdateElectra, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateElectra); + } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 3b48a68df3..a7ddf8eb31 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,5 +1,6 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::light_client_header::LightClientHeaderElectra; +use crate::LightClientHeader; use crate::{ beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, @@ -10,10 +11,10 @@ use safe_arith::ArithError; use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; -use ssz::Decode; +use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; -use ssz_types::typenum::{U4, U5, U6}; +use ssz_types::typenum::{U4, U5, U6, U7}; use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; @@ -24,20 +25,39 @@ pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; pub const EXECUTION_PAYLOAD_INDEX: usize = 25; +pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; +pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; +pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; + pub type FinalizedRootProofLen = U6; pub type CurrentSyncCommitteeProofLen = U5; pub type ExecutionPayloadProofLen = U4; - pub type NextSyncCommitteeProofLen = U5; +pub type FinalizedRootProofLenElectra = U7; +pub type CurrentSyncCommitteeProofLenElectra = U6; +pub type NextSyncCommitteeProofLenElectra = U6; + pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; +pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; + +pub type MerkleProof = Vec; +// Max light client updates by range request limits +// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration +pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; + type FinalityBranch = FixedVector; +type FinalityBranchElectra = FixedVector; type NextSyncCommitteeBranch = FixedVector; +type NextSyncCommitteeBranchElectra = FixedVector; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -119,8 +139,17 @@ pub struct LightClientUpdate { pub attested_header: LightClientHeaderElectra, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, - /// Merkle proof for next sync committee + // Merkle proof for next sync committee + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "next_sync_committee_branch_altair") + )] pub next_sync_committee_branch: NextSyncCommitteeBranch, + #[superstruct( + only(Electra), + partial_getter(rename = "next_sync_committee_branch_electra") + )] + pub next_sync_committee_branch: NextSyncCommitteeBranchElectra, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -131,7 +160,13 @@ pub struct LightClientUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. + #[superstruct( + only(Altair, Capella, Deneb), + partial_getter(rename = "finality_branch_altair") + )] pub finality_branch: FinalityBranch, + #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + pub finality_branch: FinalityBranchElectra, /// current sync aggreggate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -160,8 +195,8 @@ impl LightClientUpdate { sync_aggregate: &SyncAggregate, block_slot: Slot, next_sync_committee: Arc>, - next_sync_committee_branch: FixedVector, - finality_branch: FixedVector, + next_sync_committee_branch: Vec, + finality_branch: Vec, attested_block: &SignedBlindedBeaconBlock, finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, @@ -184,9 +219,9 @@ impl LightClientUpdate { Self::Altair(LightClientUpdateAltair { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -204,9 +239,9 @@ impl LightClientUpdate { Self::Capella(LightClientUpdateCapella { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -224,9 +259,9 @@ impl LightClientUpdate { Self::Deneb(LightClientUpdateDeneb { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -244,9 +279,9 @@ impl LightClientUpdate { Self::Electra(LightClientUpdateElectra { attested_header, next_sync_committee, - next_sync_committee_branch, + next_sync_committee_branch: next_sync_committee_branch.into(), finalized_header, - finality_branch, + finality_branch: finality_branch.into(), sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -386,23 +421,54 @@ impl LightClientUpdate { return Ok(new.signature_slot() < self.signature_slot()); } - fn is_next_sync_committee_branch_empty(&self) -> bool { - for index in self.next_sync_committee_branch().iter() { - if *index != Hash256::default() { - return false; - } - } - true + fn is_next_sync_committee_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.next_sync_committee_branch.as_ref()) + }) } - pub fn is_finality_branch_empty(&self) -> bool { - for index in self.finality_branch().iter() { - if *index != Hash256::default() { - return false; - } - } - true + pub fn is_finality_branch_empty<'a>(&'a self) -> bool { + map_light_client_update_ref!(&'a _, self.to_ref(), |update, cons| { + cons(update); + is_empty_branch(update.finality_branch.as_ref()) + }) } + + // A `LightClientUpdate` has two `LightClientHeader`s + // Spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate + #[allow(clippy::arithmetic_side_effects)] + pub fn ssz_max_len_for_fork(fork_name: ForkName) -> usize { + let fixed_len = match fork_name { + ForkName::Base | ForkName::Bellatrix => 0, + ForkName::Altair => as Encode>::ssz_fixed_len(), + ForkName::Capella => as Encode>::ssz_fixed_len(), + ForkName::Deneb => as Encode>::ssz_fixed_len(), + ForkName::Electra => as Encode>::ssz_fixed_len(), + }; + fixed_len + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) + } + + pub fn map_with_fork_name(&self, func: F) -> R + where + F: Fn(ForkName) -> R, + { + match self { + Self::Altair(_) => func(ForkName::Altair), + Self::Capella(_) => func(ForkName::Capella), + Self::Deneb(_) => func(ForkName::Deneb), + Self::Electra(_) => func(ForkName::Electra), + } + } +} + +fn is_empty_branch(branch: &[Hash256]) -> bool { + for index in branch.iter() { + if *index != Hash256::default() { + return false; + } + } + true } fn compute_sync_committee_period_at_slot( @@ -416,16 +482,53 @@ fn compute_sync_committee_period_at_slot( #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; use ssz_types::typenum::Unsigned; - ssz_tests!(LightClientUpdateDeneb); + // `ssz_tests!` can only be defined once per namespace + #[cfg(test)] + mod altair { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateAltair); + } + + #[cfg(test)] + mod capella { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateCapella); + } + + #[cfg(test)] + mod deneb { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateDeneb); + } + + #[cfg(test)] + mod electra { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateElectra); + } #[test] fn finalized_root_params() { assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32 + 1) > FINALIZED_ROOT_INDEX); assert_eq!(FinalizedRootProofLen::to_usize(), FINALIZED_ROOT_PROOF_LEN); + + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32) <= FINALIZED_ROOT_INDEX_ELECTRA + ); + assert!( + 2usize.pow(FINALIZED_ROOT_PROOF_LEN_ELECTRA as u32 + 1) > FINALIZED_ROOT_INDEX_ELECTRA + ); + assert_eq!( + FinalizedRootProofLenElectra::to_usize(), + FINALIZED_ROOT_PROOF_LEN_ELECTRA + ); } #[test] @@ -440,6 +543,19 @@ mod tests { CurrentSyncCommitteeProofLen::to_usize(), CURRENT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + CurrentSyncCommitteeProofLenElectra::to_usize(), + CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } #[test] @@ -450,5 +566,18 @@ mod tests { NextSyncCommitteeProofLen::to_usize(), NEXT_SYNC_COMMITTEE_PROOF_LEN ); + + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32) + <= NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert!( + 2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA as u32 + 1) + > NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + ); + assert_eq!( + NextSyncCommitteeProofLenElectra::to_usize(), + NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA + ); } } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 694ab9af0e..b469b7b777 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -27,8 +27,6 @@ pub struct BasePreset { #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_upward_multiplier: u64, #[serde(with = "serde_utils::quoted_u64")] - pub safe_slots_to_update_justified: u64, - #[serde(with = "serde_utils::quoted_u64")] pub min_deposit_amount: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance: u64, @@ -90,7 +88,6 @@ impl BasePreset { hysteresis_quotient: spec.hysteresis_quotient, hysteresis_downward_multiplier: spec.hysteresis_downward_multiplier, hysteresis_upward_multiplier: spec.hysteresis_upward_multiplier, - safe_slots_to_update_justified: spec.safe_slots_to_update_justified, min_deposit_amount: spec.min_deposit_amount, max_effective_balance: spec.max_effective_balance, effective_balance_increment: spec.effective_balance_increment, diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 941c420b55..f2a16bacd5 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, DepositData, Epoch, + EthSpec, FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -35,6 +35,34 @@ pub struct Validator { } impl Validator { + #[allow(clippy::arithmetic_side_effects)] + pub fn from_deposit( + deposit_data: &DepositData, + amount: u64, + fork_name: ForkName, + spec: &ChainSpec, + ) -> Self { + let mut validator = Validator { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: 0, + slashed: false, + }; + + let max_effective_balance = validator.get_max_effective_balance(spec, fork_name); + // safe math is unnecessary here since the spec.effecive_balance_increment is never <= 0 + validator.effective_balance = std::cmp::min( + amount - (amount % spec.effective_balance_increment), + max_effective_balance, + ); + + validator + } + /// Returns `true` if the validator is considered active at some epoch. pub fn is_active_at(&self, epoch: Epoch) -> bool { self.activation_epoch <= epoch && epoch < self.exit_epoch diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 7c37aa6d67..1125697c7c 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -49,7 +49,7 @@ clap_utils = { workspace = true } eth2_network_config = { workspace = true } lighthouse_version = { workspace = true } account_utils = { workspace = true } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 9ad40a6acd..89d759d662 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -333,7 +333,7 @@ impl EnvironmentBuilder { eth2_network_config: Eth2NetworkConfig, ) -> Result { // Create a new chain spec from the default configuration. - self.eth2_config.spec = Arc::new(eth2_network_config.chain_spec::()?); + self.eth2_config.spec = eth2_network_config.chain_spec::()?.into(); self.eth2_network_config = Some(eth2_network_config); Ok(self) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index aad8860fcc..e33e4cb9b8 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -115,16 +115,6 @@ fn main() { .global(true) .display_order(0), ) - .arg( - Arg::new("env_log") - .short('l') - .help( - "DEPRECATED Enables environment logging giving access to sub-protocol logs such as discv5 and libp2p", - ) - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("logfile") .long("logfile") @@ -333,57 +323,43 @@ fn main() { Arg::new("terminal-total-difficulty-override") .long("terminal-total-difficulty-override") .value_name("INTEGER") - .help("Used to coordinate manual overrides to the TERMINAL_TOTAL_DIFFICULTY parameter. \ - Accepts a 256-bit decimal integer (not a hex value). \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal difficulty. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-override") .long("terminal-block-hash-override") .value_name("TERMINAL_BLOCK_HASH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH parameter. \ - This flag should only be used if the user has a clear understanding that \ - the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-epoch-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("terminal-block-hash-epoch-override") .long("terminal-block-hash-epoch-override") .value_name("EPOCH") - .help("Used to coordinate manual overrides to the TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override the terminal PoW block. \ - Incorrect use of this flag will cause your node to experience a consensus \ - failure. Be extremely careful with this flag.") + .help("DEPRECATED") .requires("terminal-block-hash-override") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("safe-slots-to-import-optimistically") .long("safe-slots-to-import-optimistically") .value_name("INTEGER") - .help("Used to coordinate manual overrides of the SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY \ - parameter. This flag should only be used if the user has a clear understanding \ - that the broad Ethereum community has elected to override this parameter in the event \ - of an attack at the PoS transition block. Incorrect use of this flag can cause your \ - node to possibly accept an invalid chain or sync more slowly. Be extremely careful with \ - this flag.") + .help("DEPRECATED") .action(ArgAction::Set) .global(true) .display_order(0) + .hide(true) ) .arg( Arg::new("genesis-state-url") @@ -641,6 +617,20 @@ fn run( ); } + // Warn for DEPRECATED global flags. This code should be removed when we finish deleting these + // flags. + let deprecated_flags = [ + "terminal-total-difficulty-override", + "terminal-block-hash-override", + "terminal-block-hash-epoch-override", + "safe-slots-to-import-optimistically", + ]; + for flag in deprecated_flags { + if matches.get_one::(flag).is_some() { + slog::warn!(log, "The {} flag is deprecated and does nothing", flag); + } + } + // Note: the current code technically allows for starting a beacon node _and_ a validator // client at the same time. // diff --git a/lighthouse/src/metrics.rs b/lighthouse/src/metrics.rs index 0002b43e7b..30e0120582 100644 --- a/lighthouse/src/metrics.rs +++ b/lighthouse/src/metrics.rs @@ -1,5 +1,5 @@ -pub use lighthouse_metrics::*; use lighthouse_version::VERSION; +pub use metrics::*; use slog::{error, Logger}; use std::sync::LazyLock; use std::time::{SystemTime, UNIX_EPOCH}; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f3832a1a1e..ac7ddcdbd9 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -21,7 +21,7 @@ use std::string::ToString; use std::time::Duration; use tempfile::TempDir; use types::non_zero_usize::new_non_zero_usize; -use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, Hash256, MainnetEthSpec}; +use types::{Address, Checkpoint, Epoch, Hash256, MainnetEthSpec}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; @@ -159,13 +159,6 @@ fn max_skip_slots_flag() { .with_config(|config| assert_eq!(config.chain.import_max_skip_slots, Some(10))); } -#[test] -fn disable_lock_timeouts_flag() { - CommandLineTest::new() - .flag("disable-lock-timeouts", None) - .run_with_zero_port(); -} - #[test] fn shuffling_cache_default() { CommandLineTest::new() @@ -749,61 +742,30 @@ fn jwt_optional_flags() { fn jwt_optional_alias_flags() { run_jwt_optional_flags_test("jwt-secrets", "jwt-id", "jwt-version"); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_total_difficulty_override_flag() { - use beacon_node::beacon_chain::types::Uint256; CommandLineTest::new() .flag("terminal-total-difficulty-override", Some("1337424242")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.terminal_total_difficulty, Uint256::from(1337424242)) - }); + .run_with_zero_port(); } +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn terminal_block_hash_and_activation_epoch_override_flags() { CommandLineTest::new() .flag("terminal-block-hash-epoch-override", Some("1337")) - .flag( - "terminal-block-hash-override", - Some("0x4242424242424242424242424242424242424242424242424242424242424242"), - ) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!( - spec.terminal_block_hash, - ExecutionBlockHash::from_str( - "0x4242424242424242424242424242424242424242424242424242424242424242" - ) - .unwrap() - ); - assert_eq!(spec.terminal_block_hash_activation_epoch, 1337); - }); -} -#[test] -#[should_panic] -fn terminal_block_hash_missing_activation_epoch() { - CommandLineTest::new() .flag( "terminal-block-hash-override", Some("0x4242424242424242424242424242424242424242424242424242424242424242"), ) .run_with_zero_port(); } -#[test] -#[should_panic] -fn epoch_override_missing_terminal_block_hash() { - CommandLineTest::new() - .flag("terminal-block-hash-epoch-override", Some("1337")) - .run_with_zero_port(); -} +// DEPRECATED. This flag is deprecated but should not cause a crash. #[test] fn safe_slots_to_import_optimistically_flag() { CommandLineTest::new() .flag("safe-slots-to-import-optimistically", Some("421337")) - .run_with_zero_port() - .with_spec::(|spec| { - assert_eq!(spec.safe_slots_to_import_optimistically, 421337) - }); + .run_with_zero_port(); } // Tests for Network flags. @@ -1612,19 +1574,6 @@ fn http_port_flag() { .run() .with_config(|config| assert_eq!(config.http_api.listen_port, port1)); } -#[test] -fn empty_self_limiter_flag() { - // Test that empty rate limiter is accepted using the default rate limiting configurations. - CommandLineTest::new() - .flag("self-limiter", None) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.network.outbound_rate_limiter_config, - Some(lighthouse_network::rpc::config::OutboundRateLimiterConfig::default()) - ) - }); -} #[test] fn empty_inbound_rate_limiter_flag() { @@ -1667,14 +1616,6 @@ fn http_allow_origin_all_flag() { .with_config(|config| assert_eq!(config.http_api.allow_origin, Some("*".to_string()))); } -#[test] -fn http_allow_sync_stalled_flag() { - CommandLineTest::new() - .flag("http", None) - .flag("http-allow-sync-stalled", None) - .run_with_zero_port(); -} - #[test] fn http_enable_beacon_processor() { CommandLineTest::new() @@ -1713,22 +1654,6 @@ fn http_tls_flags() { }); } -#[test] -fn http_spec_fork_default() { - CommandLineTest::new() - .flag("http", None) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.spec_fork_name, None)); -} - -#[test] -fn http_spec_fork_override() { - CommandLineTest::new() - .flag("http", None) - .flag("http-spec-fork", Some("altair")) - .run_with_zero_port(); -} - // Tests for Metrics flags. #[test] fn metrics_flag() { @@ -2631,14 +2556,6 @@ fn invalid_gossip_verified_blocks_path() { }); } -#[test] -fn progressive_balances_checked() { - // Flag is deprecated but supplying it should not crash until we remove it completely. - CommandLineTest::new() - .flag("progressive-balances", Some("checked")) - .run_with_zero_port(); -} - #[test] fn beacon_processor() { CommandLineTest::new() diff --git a/lighthouse/tests/exec.rs b/lighthouse/tests/exec.rs index 9d6453908c..5379912c13 100644 --- a/lighthouse/tests/exec.rs +++ b/lighthouse/tests/exec.rs @@ -140,11 +140,6 @@ impl CompletedTest { func(&self.config); } - pub fn with_spec(self, func: F) { - let spec = ChainSpec::from_config::(&self.chain_config).unwrap(); - func(spec); - } - pub fn with_config_and_dir(self, func: F) { func(&self.config, &self.dir); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index baf50aa7c0..147a371f0e 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -426,13 +426,6 @@ fn no_doppelganger_protection_flag() { .with_config(|config| assert!(!config.enable_doppelganger_protection)); } -#[test] -fn produce_block_v3_flag() { - // The flag is DEPRECATED but providing it should not trigger an error. - // We can delete this test when deleting the flag entirely. - CommandLineTest::new().flag("produce-block-v3", None).run(); -} - #[test] fn no_gas_limit_flag() { CommandLineTest::new() @@ -513,23 +506,6 @@ fn monitoring_endpoint() { assert_eq!(api_conf.update_period_secs, Some(30)); }); } -#[test] -fn disable_run_on_all_flag() { - CommandLineTest::new() - .flag("disable-run-on-all", None) - .run() - .with_config(|config| { - assert_eq!(config.broadcast_topics, vec![]); - }); - // --broadcast flag takes precedence - CommandLineTest::new() - .flag("disable-run-on-all", None) - .flag("broadcast", Some("attestations")) - .run() - .with_config(|config| { - assert_eq!(config.broadcast_topics, vec![ApiTopic::Attestations]); - }); -} #[test] fn no_broadcast_flag() { @@ -623,16 +599,6 @@ fn disable_latency_measurement_service() { assert!(!config.enable_latency_measurement_service); }); } -#[test] -fn latency_measurement_service() { - // This flag is DEPRECATED so has no effect, but should still be accepted. - CommandLineTest::new() - .flag("latency-measurement-service", Some("false")) - .run() - .with_config(|config| { - assert!(config.enable_latency_measurement_service); - }); -} #[test] fn validator_registration_batch_size() { diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index bca6a18ab5..999f3c3141 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -9,7 +9,9 @@ use tempfile::{tempdir, TempDir}; use types::*; use validator_manager::{ create_validators::CreateConfig, + delete_validators::DeleteConfig, import_validators::ImportConfig, + list_validators::ListConfig, move_validators::{MoveConfig, PasswordSource, Validators}, }; @@ -105,6 +107,18 @@ impl CommandLineTest { } } +impl CommandLineTest { + fn validators_list() -> Self { + Self::default().flag("list", None) + } +} + +impl CommandLineTest { + fn validators_delete() -> Self { + Self::default().flag("delete", None) + } +} + #[test] pub fn validator_create_without_output_path() { CommandLineTest::validators_create().assert_failed(); @@ -199,10 +213,18 @@ pub fn validator_import_defaults() { .flag("--vc-token", Some("./token.json")) .assert_success(|config| { let expected = ImportConfig { - validators_file_path: PathBuf::from("./vals.json"), + validators_file_path: Some(PathBuf::from("./vals.json")), + keystore_file_path: None, vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), vc_token_path: PathBuf::from("./token.json"), ignore_duplicates: false, + password: None, + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, }; assert_eq!(expected, config); }); @@ -216,10 +238,18 @@ pub fn validator_import_misc_flags() { .flag("--ignore-duplicates", None) .assert_success(|config| { let expected = ImportConfig { - validators_file_path: PathBuf::from("./vals.json"), + validators_file_path: Some(PathBuf::from("./vals.json")), + keystore_file_path: None, vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), vc_token_path: PathBuf::from("./token.json"), ignore_duplicates: true, + password: None, + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, }; assert_eq!(expected, config); }); @@ -233,7 +263,17 @@ pub fn validator_import_missing_token() { } #[test] -pub fn validator_import_missing_validators_file() { +pub fn validator_import_using_both_file_flags() { + CommandLineTest::validators_import() + .flag("--vc-token", Some("./token.json")) + .flag("--validators-file", Some("./vals.json")) + .flag("--keystore-file", Some("./keystore.json")) + .flag("--password", Some("abcd")) + .assert_failed(); +} + +#[test] +pub fn validator_import_missing_both_file_flags() { CommandLineTest::validators_import() .flag("--vc-token", Some("./token.json")) .assert_failed(); @@ -394,3 +434,37 @@ pub fn validator_move_count() { assert_eq!(expected, config); }); } + +#[test] +pub fn validator_list_defaults() { + CommandLineTest::validators_list() + .flag("--vc-token", Some("./token.json")) + .assert_success(|config| { + let expected = ListConfig { + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + }; + assert_eq!(expected, config); + }); +} + +#[test] +pub fn validator_delete_defaults() { + CommandLineTest::validators_delete() + .flag( + "--validators", + Some(&format!("{},{}", EXAMPLE_PUBKEY_0, EXAMPLE_PUBKEY_1)), + ) + .flag("--vc-token", Some("./token.json")) + .assert_success(|config| { + let expected = DeleteConfig { + vc_url: SensitiveUrl::parse("http://localhost:5062").unwrap(), + vc_token_path: PathBuf::from("./token.json"), + validators_to_delete: vec![ + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_0).unwrap(), + PublicKeyBytes::from_str(EXAMPLE_PUBKEY_1).unwrap(), + ], + }; + assert_eq!(expected, config); + }); +} diff --git a/scripts/cli.sh b/scripts/cli.sh index e43c05a834..ef4ed158ad 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -40,7 +40,7 @@ vm_import=./help_vm_import.md vm_move=./help_vm_move.md # create .md files -write_to_file "$general_cli" "$general" "Lighthouse General Commands" +write_to_file "$general_cli" "$general" "Lighthouse CLI Reference" write_to_file "$bn_cli" "$bn" "Beacon Node" write_to_file "$vc_cli" "$vc" "Validator Client" write_to_file "$vm_cli" "$vm" "Validator Manager" diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 0275cb217f..ca701eb7e9 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -9,7 +9,7 @@ This setup can be useful for testing and development. 1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version. -1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `sudo apt install yq -y`. +1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. ## Starting the testnet @@ -82,4 +82,4 @@ The script comes with some CLI options, which can be viewed with `./start_local_ ```bash ./start_local_testnet.sh -b false -``` \ No newline at end of file +``` diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index f90132764e..1f15688693 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -7,7 +7,7 @@ set -Eeuo pipefail SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" ENCLAVE_NAME=local-testnet NETWORK_PARAMS_FILE=$SCRIPT_DIR/network_params.yaml -ETHEREUM_PKG_VERSION=4.2.0 +ETHEREUM_PKG_VERSION=main BUILD_IMAGE=true BUILDER_PROPOSALS=false diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index d74b0ac062..56a023df0b 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -18,7 +18,7 @@ derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } filesystem = { workspace = true } lru = { workspace = true } parking_lot = { workspace = true } @@ -37,7 +37,7 @@ mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } -redb = { version = "2.1", optional = true } +redb = { version = "2.1.4", optional = true } [dev-dependencies] maplit = { workspace = true } diff --git a/slasher/src/database/redb_impl.rs b/slasher/src/database/redb_impl.rs index 6c5b62a44f..12bef71148 100644 --- a/slasher/src/database/redb_impl.rs +++ b/slasher/src/database/redb_impl.rs @@ -164,13 +164,9 @@ impl<'env> Cursor<'env> { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(&self.db.table_name); let table = self.txn.open_table(table_definition)?; - let first = table - .iter()? - .next() - .map(|x| x.map(|(key, _)| key.value().to_vec())); + let first = table.first()?.map(|(key, _)| key.value().to_vec()); if let Some(owned_key) = first { - let owned_key = owned_key?; self.current_key = Some(Cow::from(owned_key)); Ok(self.current_key.clone()) } else { @@ -182,13 +178,9 @@ impl<'env> Cursor<'env> { let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(&self.db.table_name); let table = self.txn.open_table(table_definition)?; - let last = table - .iter()? - .next_back() - .map(|x| x.map(|(key, _)| key.value().to_vec())); + let last = table.last()?.map(|(key, _)| key.value().to_vec()); if let Some(owned_key) = last { - let owned_key = owned_key?; self.current_key = Some(Cow::from(owned_key)); return Ok(self.current_key.clone()); } diff --git a/slasher/src/metrics.rs b/slasher/src/metrics.rs index 2e49bd4aeb..cfeec2d74e 100644 --- a/slasher/src/metrics.rs +++ b/slasher/src/metrics.rs @@ -1,4 +1,4 @@ -pub use lighthouse_metrics::*; +pub use metrics::*; use std::sync::LazyLock; pub static SLASHER_DATABASE_SIZE: LazyLock> = LazyLock::new(|| { diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 9495047e7f..dacca204c1 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -25,6 +25,8 @@ excluded_paths = [ # Intentionally omitted, as per https://github.com/sigp/lighthouse/issues/1835 "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", + # We no longer implement merge logic. + "tests/.*/bellatrix/fork_choice/on_merge_block", # light_client "tests/.*/.*/light_client/single_merkle_proof", "tests/.*/.*/light_client/sync", @@ -46,11 +48,6 @@ excluded_paths = [ "tests/.*/eip6110", "tests/.*/whisk", "tests/.*/eip7594", - # TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - "tests/.*/electra/ssz_static/LightClientUpdate", - "tests/.*/electra/ssz_static/LightClientFinalityUpdate", - "tests/.*/electra/ssz_static/LightClientBootstrap", - "tests/.*/electra/merkle_proof", ] diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index b68bbdc5d3..49c0719784 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -3,8 +3,8 @@ use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use tree_hash::Hash256; use types::{ - BeaconBlockBody, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, FixedVector, - FullPayload, Unsigned, + light_client_update, BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, + BeaconBlockBodyElectra, BeaconState, FixedVector, FullPayload, Unsigned, }; #[derive(Debug, Clone, Deserialize)] @@ -22,13 +22,13 @@ pub struct MerkleProof { #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] -pub struct MerkleProofValidity { +pub struct BeaconStateMerkleProofValidity { pub metadata: Option, pub state: BeaconState, pub merkle_proof: MerkleProof, } -impl LoadCase for MerkleProofValidity { +impl LoadCase for BeaconStateMerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); let state = ssz_decode_state(&path.join("object.ssz_snappy"), spec)?; @@ -49,11 +49,30 @@ impl LoadCase for MerkleProofValidity { } } -impl Case for MerkleProofValidity { +impl Case for BeaconStateMerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); state.update_tree_hash_cache().unwrap(); - let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else { + + let proof = match self.merkle_proof.leaf_index { + light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::CURRENT_SYNC_COMMITTEE_INDEX => { + state.compute_current_sync_committee_proof() + } + light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + | light_client_update::NEXT_SYNC_COMMITTEE_INDEX => { + state.compute_next_sync_committee_proof() + } + light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + | light_client_update::FINALIZED_ROOT_INDEX => state.compute_finalized_root_proof(), + _ => { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof, invalid index".to_string(), + )); + } + }; + + let Ok(proof) = proof else { return Err(Error::FailedToParseTest( "Could not retrieve merkle proof".to_string(), )); @@ -198,3 +217,81 @@ impl Case for KzgInclusionMerkleProofValidity { } } } + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct BeaconBlockBodyMerkleProofValidity { + pub metadata: Option, + pub block_body: BeaconBlockBody>, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for BeaconBlockBodyMerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let block_body: BeaconBlockBody> = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Bellatrix => { + return Err(Error::InternalError(format!( + "Beacon block body merkle proof validity test skipped for {:?}", + fork_name + ))) + } + ForkName::Capella => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + ForkName::Deneb => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() + } + ForkName::Electra => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + .into() + } + }; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + Ok(Self { + metadata, + block_body, + merkle_proof, + }) + } +} + +impl Case for BeaconBlockBodyMerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let binding = self.block_body.clone(); + let block_body = binding.to_ref(); + let Ok(proof) = block_body.block_body_merkle_proof(self.merkle_proof.leaf_index) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merke proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + + Ok(()) + } +} diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 97b449dab9..f4a09de32c 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -627,8 +627,8 @@ impl Handler for ForkChoiceHandler { } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - // Merge block tests are only enabled for Bellatrix. - if self.handler_name == "on_merge_block" && fork_name != ForkName::Bellatrix { + // We no longer run on_merge_block tests since removing merge support. + if self.handler_name == "on_merge_block" { return false; } @@ -921,10 +921,10 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { #[derive(Derivative)] #[derivative(Default(bound = ""))] -pub struct MerkleProofValidityHandler(PhantomData); +pub struct BeaconStateMerkleProofValidityHandler(PhantomData); -impl Handler for MerkleProofValidityHandler { - type Case = cases::MerkleProofValidity; +impl Handler for BeaconStateMerkleProofValidityHandler { + type Case = cases::BeaconStateMerkleProofValidity; fn config_name() -> &'static str { E::name() @@ -935,15 +935,11 @@ impl Handler for MerkleProofValidityHandler { } fn handler_name(&self) -> String { - "single_merkle_proof".into() + "single_merkle_proof/BeaconState".into() } - fn is_enabled_for_fork(&self, _fork_name: ForkName) -> bool { - // Test is skipped due to some changes in the Capella light client - // spec. - // - // https://github.com/sigp/lighthouse/issues/4022 - false + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.altair_enabled() } } @@ -967,8 +963,32 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - // TODO(electra) re-enable for electra once merkle proof issues for electra are resolved - fork_name.deneb_enabled() && !fork_name.electra_enabled() + // Enabled in Deneb + fork_name.deneb_enabled() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct BeaconBlockBodyMerkleProofValidityHandler(PhantomData); + +impl Handler for BeaconBlockBodyMerkleProofValidityHandler { + type Case = cases::BeaconBlockBodyMerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "single_merkle_proof/BeaconBlockBody".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + fork_name.capella_enabled() } } @@ -993,8 +1013,7 @@ impl Handler for LightClientUpdateHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Enabled in Altair - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - fork_name.altair_enabled() && fork_name != ForkName::Electra + fork_name.altair_enabled() } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 1184fbb514..292625a371 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -395,11 +395,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only() - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only() - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only() + .run(); } // LightClientHeader has no internal indicator of which fork it is for, so we test it separately. @@ -475,13 +474,12 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only( ) .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } // LightClientUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -505,13 +503,12 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::deneb_only() .run(); - // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved - // SszStaticHandler::, MinimalEthSpec>::electra_only( - // ) - // .run(); - // SszStaticHandler::, MainnetEthSpec>::electra_only( - // ) - // .run(); + SszStaticHandler::, MinimalEthSpec>::electra_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::electra_only( + ) + .run(); } #[test] @@ -825,12 +822,6 @@ fn fork_choice_on_block() { ForkChoiceHandler::::new("on_block").run(); } -#[test] -fn fork_choice_on_merge_block() { - ForkChoiceHandler::::new("on_merge_block").run(); - ForkChoiceHandler::::new("on_merge_block").run(); -} - #[test] fn fork_choice_ex_ante() { ForkChoiceHandler::::new("ex_ante").run(); @@ -927,8 +918,13 @@ fn kzg_recover_cells_and_proofs() { } #[test] -fn merkle_proof_validity() { - MerkleProofValidityHandler::::default().run(); +fn beacon_state_merkle_proof_validity() { + BeaconStateMerkleProofValidityHandler::::default().run(); +} + +#[test] +fn beacon_block_body_merkle_proof_validity() { + BeaconBlockBodyMerkleProofValidityHandler::::default().run(); } #[test] diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 4c338e91b9..86825a9ee3 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -48,7 +48,7 @@ ethereum_serde_utils = { workspace = true } libsecp256k1 = { workspace = true } ring = { workspace = true } rand = { workspace = true, features = ["small_rng"] } -lighthouse_metrics = { workspace = true } +metrics = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } task_executor = { workspace = true } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 665eaf0a0f..9903324cad 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -525,7 +525,7 @@ impl BlockService { &[metrics::BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blocks(signed_block) + .post_beacon_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } @@ -535,7 +535,7 @@ impl BlockService { &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blinded_blocks(signed_block) + .post_beacon_blinded_blocks_v2_ssz(signed_block, None) .await .or_else(|e| handle_block_post_error(e, slot, log))? } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index b027ad0df6..209876f07b 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -39,20 +39,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - // TODO remove this flag in a future release - .arg( - Arg::new("disable-run-on-all") - .long("disable-run-on-all") - .value_name("DISABLE_RUN_ON_ALL") - .help("DEPRECATED. Use --broadcast. \ - By default, Lighthouse publishes attestation, sync committee subscriptions \ - and proposer preparation messages to all beacon nodes provided in the \ - `--beacon-nodes flag`. This option changes that behaviour such that these \ - api calls only go out to the first available and synced beacon node") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("broadcast") .long("broadcast") @@ -167,14 +153,6 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) - .arg( - Arg::new("produce-block-v3") - .long("produce-block-v3") - .help("This flag is deprecated and is no longer in use.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) .arg( Arg::new("distributed") .long("distributed") @@ -403,15 +381,6 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) - .arg( - Arg::new("latency-measurement-service") - .long("latency-measurement-service") - .help("DEPRECATED") - .action(ArgAction::Set) - .help_heading(FLAG_HEADER) - .display_order(0) - .hide(true) - ) .arg( Arg::new("validator-registration-batch-size") .long("validator-registration-batch-size") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index c2c445c48c..f42ed55146 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -244,14 +244,6 @@ impl Config { config.distributed = true; } - if cli_args.get_flag("disable-run-on-all") { - warn!( - log, - "The --disable-run-on-all flag is deprecated"; - "msg" => "please use --broadcast instead" - ); - config.broadcast_topics = vec![]; - } if let Some(broadcast_topics) = cli_args.get_one::("broadcast") { config.broadcast_topics = broadcast_topics .split(',') @@ -397,14 +389,6 @@ impl Config { config.prefer_builder_proposals = true; } - if cli_args.get_flag("produce-block-v3") { - warn!( - log, - "produce-block-v3 flag"; - "note" => "deprecated flag has no effect and should be removed" - ); - } - config.gas_limit = cli_args .get_one::("gas-limit") .map(|gas_limit| { @@ -429,17 +413,6 @@ impl Config { config.enable_latency_measurement_service = !cli_args.get_flag("disable-latency-measurement-service"); - if cli_args - .get_one::("latency-measurement-service") - .is_some() - { - warn!( - log, - "latency-measurement-service flag"; - "note" => "deprecated flag has no effect and should be removed" - ); - } - config.validator_registration_batch_size = parse_required(cli_args, "validator-registration-batch-size")?; if config.validator_registration_batch_size == 0 { diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index 074c578347..e5477ff8df 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -75,12 +75,6 @@ pub fn import( ))); } - info!( - log, - "Importing keystores via standard HTTP API"; - "count" => request.keystores.len(), - ); - // Import slashing protection data before keystores, so that new keystores don't start signing // without it. Do not return early on failure, propagate the failure to each key. let slashing_protection_status = @@ -156,6 +150,19 @@ pub fn import( statuses.push(status); } + let successful_import = statuses + .iter() + .filter(|status| matches!(status.status, ImportKeystoreStatus::Imported)) + .count(); + + if successful_import > 0 { + info!( + log, + "Imported keystores via standard HTTP API"; + "count" => successful_import, + ); + } + Ok(ImportKeystoresResponse { data: statuses }) } @@ -238,7 +245,23 @@ pub fn delete( task_executor: TaskExecutor, log: Logger, ) -> Result { - let export_response = export(request, validator_store, task_executor, log)?; + let export_response = export(request, validator_store, task_executor, log.clone())?; + + // Check the status is Deleted to confirm deletion is successful, then only display the log + let successful_deletion = export_response + .data + .iter() + .filter(|response| matches!(response.status.status, DeleteKeystoreStatus::Deleted)) + .count(); + + if successful_deletion > 0 { + info!( + log, + "Deleted keystore via standard HTTP API"; + "count" => successful_deletion, + ); + } + Ok(DeleteKeystoresResponse { data: export_response .data diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 8bc569c67a..57e1080fd9 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -38,7 +38,7 @@ pub const SUBSCRIPTIONS: &str = "subscriptions"; pub const LOCAL_KEYSTORE: &str = "local_keystore"; pub const WEB3SIGNER: &str = "web3signer"; -pub use lighthouse_metrics::*; +pub use metrics::*; pub static GENESIS_DISTANCE: LazyLock> = LazyLock::new(|| { try_create_int_gauge( @@ -316,9 +316,7 @@ pub fn gather_prometheus_metrics( warp_utils::metrics::scrape_health_metrics(); - encoder - .encode(&lighthouse_metrics::gather(), &mut buffer) - .unwrap(); + encoder.encode(&metrics::gather(), &mut buffer).unwrap(); String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index c94115e5ec..0ef9a6a13d 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -16,8 +16,8 @@ use account_utils::{ ZeroizeString, }; use eth2_keystore::Keystore; -use lighthouse_metrics::set_gauge; use lockfile::{Lockfile, LockfileError}; +use metrics::set_gauge; use parking_lot::{MappedMutexGuard, Mutex, MutexGuard}; use reqwest::{Certificate, Client, Error as ReqwestError, Identity}; use slog::{debug, error, info, warn, Logger}; diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 9a02ffdefb..05ec1e53aa 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -25,7 +25,7 @@ pub use beacon_node_health::BeaconNodeSyncDistanceTiers; pub use cli::cli_app; pub use config::Config; use initialized_validators::InitializedValidators; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use monitoring_api::{MonitoringHttpClient, ProcessType}; use sensitive_url::SensitiveUrl; pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index 00d7b14de7..cda13a5e63 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -1,6 +1,6 @@ use crate::http_metrics; use crate::{DutiesService, ProductionValidatorClient}; -use lighthouse_metrics::set_gauge; +use metrics::set_gauge; use slog::{debug, error, info, Logger}; use slot_clock::SlotClock; use tokio::time::{sleep, Duration}; diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index ebcde6a828..92267ad875 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -20,6 +20,7 @@ tree_hash = { workspace = true } eth2 = { workspace = true } hex = { workspace = true } tokio = { workspace = true } +derivative = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index 37a6040a9b..d4403b4613 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -45,15 +45,6 @@ pub fn cli_app() -> Command { Another, optional JSON file is created which contains a list of validator \ deposits in the same format as the \"ethereum/staking-deposit-cli\" tool.", ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(OUTPUT_PATH_FLAG) .long(OUTPUT_PATH_FLAG) diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs new file mode 100644 index 0000000000..6283279986 --- /dev/null +++ b/validator_manager/src/delete_validators.rs @@ -0,0 +1,293 @@ +use clap::{Arg, ArgAction, ArgMatches, Command}; +use eth2::{ + lighthouse_vc::types::{DeleteKeystoreStatus, DeleteKeystoresRequest}, + SensitiveUrl, +}; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use types::PublicKeyBytes; + +use crate::{common::vc_http_client, DumpConfig}; + +pub const CMD: &str = "delete"; +pub const VC_URL_FLAG: &str = "vc-url"; +pub const VC_TOKEN_FLAG: &str = "vc-token"; +pub const VALIDATOR_FLAG: &str = "validators"; + +#[derive(Debug)] +pub enum DeleteError { + InvalidPublicKey, + DeleteFailed(eth2::Error), +} + +pub fn cli_app() -> Command { + Command::new(CMD) + .about("Deletes one or more validators from a validator client using the HTTP API.") + .arg( + Arg::new(VC_URL_FLAG) + .long(VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help("A HTTP(S) address of a validator client using the keymanager-API.") + .default_value("http://localhost:5062") + .requires(VC_TOKEN_FLAG) + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VC_TOKEN_FLAG) + .long(VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the validator client.") + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VALIDATOR_FLAG) + .long(VALIDATOR_FLAG) + .value_name("STRING") + .help("Comma-separated list of validators (pubkey) that will be deleted.") + .action(ArgAction::Set) + .required(true) + .display_order(0), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct DeleteConfig { + pub vc_url: SensitiveUrl, + pub vc_token_path: PathBuf, + pub validators_to_delete: Vec, +} + +impl DeleteConfig { + fn from_cli(matches: &ArgMatches) -> Result { + let validators_to_delete_str = + clap_utils::parse_required::(matches, VALIDATOR_FLAG)?; + + let validators_to_delete = validators_to_delete_str + .split(',') + .map(|s| s.trim().parse()) + .collect::, _>>()?; + + Ok(Self { + vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, + validators_to_delete, + vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + }) + } +} + +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { + let config = DeleteConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await + } +} + +async fn run<'a>(config: DeleteConfig) -> Result<(), String> { + let DeleteConfig { + vc_url, + vc_token_path, + validators_to_delete, + } = config; + + let (http_client, validators) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + + for validator_to_delete in &validators_to_delete { + if !validators + .iter() + .any(|validator| &validator.validating_pubkey == validator_to_delete) + { + return Err(format!("Validator {} doesn't exist", validator_to_delete)); + } + } + + let delete_request = DeleteKeystoresRequest { + pubkeys: validators_to_delete.clone(), + }; + + let responses = http_client + .delete_keystores(&delete_request) + .await + .map_err(|e| format!("Error deleting keystore {}", e))? + .data; + + let mut error = false; + for (validator_to_delete, response) in validators_to_delete.iter().zip(responses.iter()) { + if response.status == DeleteKeystoreStatus::Error + || response.status == DeleteKeystoreStatus::NotFound + || response.status == DeleteKeystoreStatus::NotActive + { + error = true; + eprintln!( + "Problem with removing validator {:?}, status: {:?}", + validator_to_delete, response.status + ); + } + } + if error { + return Err("Problem with removing one or more validators".to_string()); + } + + eprintln!("Validator(s) deleted"); + Ok(()) +} + +#[cfg(not(debug_assertions))] +#[cfg(test)] +mod test { + use std::{ + fs::{self, File}, + io::Write, + str::FromStr, + }; + + use super::*; + use crate::{ + common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, + }; + use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + + struct TestBuilder { + delete_config: Option, + src_import_builder: Option, + http_config: HttpConfig, + vc_token: Option, + validators: Vec, + } + + impl TestBuilder { + async fn new() -> Self { + Self { + delete_config: None, + src_import_builder: None, + http_config: ApiTester::default_http_config(), + vc_token: None, + validators: vec![], + } + } + + async fn with_validators( + mut self, + count: u32, + first_index: u32, + indices_of_validators_to_delete: Vec, + ) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + + self.vc_token = + Some(fs::read_to_string(builder.get_import_config().vc_token_path).unwrap()); + + let local_validators: Vec = { + let contents = + fs::read_to_string(builder.get_import_config().validators_file_path.unwrap()) + .unwrap(); + serde_json::from_str(&contents).unwrap() + }; + + let import_config = builder.get_import_config(); + + let validators_to_delete = indices_of_validators_to_delete + .iter() + .map(|&index| { + PublicKeyBytes::from_str( + format!("0x{}", local_validators[index].voting_keystore.pubkey()).as_str(), + ) + .unwrap() + }) + .collect(); + + self.delete_config = Some(DeleteConfig { + vc_url: import_config.vc_url, + vc_token_path: import_config.vc_token_path, + validators_to_delete, + }); + + self.validators = local_validators.clone(); + self.src_import_builder = Some(builder); + self + } + + pub async fn run_test(self) -> TestResult { + let import_builder = self.src_import_builder.unwrap(); + let import_test_result = import_builder.run_test().await; + assert!(import_test_result.result.is_ok()); + + let path = self.delete_config.clone().unwrap().vc_token_path; + let url = self.delete_config.clone().unwrap().vc_url; + let parent = path.parent().unwrap(); + + fs::create_dir_all(parent).expect("Was not able to create parent directory"); + + File::options() + .write(true) + .read(true) + .create(true) + .truncate(true) + .open(path.clone()) + .unwrap() + .write_all(self.vc_token.clone().unwrap().as_bytes()) + .unwrap(); + + let result = run(self.delete_config.clone().unwrap()).await; + + if result.is_ok() { + let (_, list_keystores_response) = vc_http_client(url, path.clone()).await.unwrap(); + + // The remaining number of active keystores (left) = Total validators - Deleted validators (right) + assert_eq!( + list_keystores_response.len(), + self.validators.len() + - self + .delete_config + .clone() + .unwrap() + .validators_to_delete + .len() + ); + + // Check the remaining validator keys are not in validators_to_delete + assert!(list_keystores_response.iter().all(|keystore| { + !self + .delete_config + .clone() + .unwrap() + .validators_to_delete + .contains(&keystore.validating_pubkey) + })); + + return TestResult { result: Ok(()) }; + } + + TestResult { + result: Err(result.unwrap_err()), + } + } + } + + #[must_use] + struct TestResult { + result: Result<(), String>, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + } + #[tokio::test] + async fn delete_multiple_validators() { + TestBuilder::new() + .await + .with_validators(3, 0, vec![0, 1, 2]) + .await + .run_test() + .await + .assert_ok(); + } +} diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index f193e8d0fb..6065ecb603 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -1,16 +1,28 @@ use super::common::*; use crate::DumpConfig; +use account_utils::{eth2_keystore::Keystore, ZeroizeString}; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; +use derivative::Derivative; +use eth2::lighthouse_vc::types::KeystoreJsonStr; use eth2::{lighthouse_vc::std_types::ImportKeystoreStatus, SensitiveUrl}; use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; +use types::Address; pub const CMD: &str = "import"; pub const VALIDATORS_FILE_FLAG: &str = "validators-file"; +pub const KEYSTORE_FILE_FLAG: &str = "keystore-file"; pub const VC_URL_FLAG: &str = "vc-url"; pub const VC_TOKEN_FLAG: &str = "vc-token"; +pub const PASSWORD: &str = "password"; +pub const FEE_RECIPIENT: &str = "suggested-fee-recipient"; +pub const GAS_LIMIT: &str = "gas-limit"; +pub const BUILDER_PROPOSALS: &str = "builder-proposals"; +pub const BUILDER_BOOST_FACTOR: &str = "builder-boost-factor"; +pub const PREFER_BUILDER_PROPOSALS: &str = "prefer-builder-proposals"; +pub const ENABLED: &str = "enabled"; pub const DETECTED_DUPLICATE_MESSAGE: &str = "Duplicate validator detected!"; @@ -21,15 +33,6 @@ pub fn cli_app() -> Command { are defined in a JSON file which can be generated using the \"create-validators\" \ command.", ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(VALIDATORS_FILE_FLAG) .long(VALIDATORS_FILE_FLAG) @@ -39,19 +42,32 @@ pub fn cli_app() -> Command { imported to the validator client. This file is usually named \ \"validators.json\".", ) - .required(true) .action(ArgAction::Set) - .display_order(0), + .display_order(0) + .required_unless_present("keystore-file") + .conflicts_with("keystore-file"), + ) + .arg( + Arg::new(KEYSTORE_FILE_FLAG) + .long(KEYSTORE_FILE_FLAG) + .value_name("PATH_TO_KEYSTORE_FILE") + .help( + "The path to a keystore JSON file to be \ + imported to the validator client. This file is usually created \ + using staking-deposit-cli or ethstaker-deposit-cli", + ) + .action(ArgAction::Set) + .display_order(0) + .conflicts_with("validators-file") + .required_unless_present("validators-file") + .requires(PASSWORD), ) .arg( Arg::new(VC_URL_FLAG) .long(VC_URL_FLAG) .value_name("HTTP_ADDRESS") .help( - "A HTTP(S) address of a validator client using the keymanager-API. \ - If this value is not supplied then a 'dry run' will be conducted where \ - no changes are made to the validator client.", - ) + "A HTTP(S) address of a validator client using the keymanager-API.") .default_value("http://localhost:5062") .requires(VC_TOKEN_FLAG) .action(ArgAction::Set) @@ -80,29 +96,111 @@ pub fn cli_app() -> Command { ) .display_order(0), ) + .arg( + Arg::new(PASSWORD) + .long(PASSWORD) + .value_name("STRING") + .help("Password of the keystore file.") + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(FEE_RECIPIENT) + .long(FEE_RECIPIENT) + .value_name("ETH1_ADDRESS") + .help("When provided, the imported validator will use the suggested fee recipient. Omit this flag to use the default value from the VC.") + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(GAS_LIMIT) + .long(GAS_LIMIT) + .value_name("UINT64") + .help("When provided, the imported validator will use this gas limit. It is recommended \ + to leave this as the default value by not specifying this flag.",) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(BUILDER_PROPOSALS) + .long(BUILDER_PROPOSALS) + .help("When provided, the imported validator will attempt to create \ + blocks via builder rather than the local EL.",) + .value_parser(["true","false"]) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(BUILDER_BOOST_FACTOR) + .long(BUILDER_BOOST_FACTOR) + .value_name("UINT64") + .help("When provided, the imported validator will use this \ + percentage multiplier to apply to the builder's payload value \ + when choosing between a builder payload header and payload from \ + the local execution node.",) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) + .arg( + Arg::new(PREFER_BUILDER_PROPOSALS) + .long(PREFER_BUILDER_PROPOSALS) + .help("When provided, the imported validator will always prefer blocks \ + constructed by builders, regardless of payload value.",) + .value_parser(["true","false"]) + .action(ArgAction::Set) + .display_order(0) + .requires(KEYSTORE_FILE_FLAG), + ) } -#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Serialize, Deserialize, Derivative)] +#[derivative(Debug)] pub struct ImportConfig { - pub validators_file_path: PathBuf, + pub validators_file_path: Option, + pub keystore_file_path: Option, pub vc_url: SensitiveUrl, pub vc_token_path: PathBuf, pub ignore_duplicates: bool, + #[derivative(Debug = "ignore")] + pub password: Option, + pub fee_recipient: Option
, + pub gas_limit: Option, + pub builder_proposals: Option, + pub builder_boost_factor: Option, + pub prefer_builder_proposals: Option, + pub enabled: Option, } impl ImportConfig { fn from_cli(matches: &ArgMatches) -> Result { Ok(Self { - validators_file_path: clap_utils::parse_required(matches, VALIDATORS_FILE_FLAG)?, + validators_file_path: clap_utils::parse_optional(matches, VALIDATORS_FILE_FLAG)?, + keystore_file_path: clap_utils::parse_optional(matches, KEYSTORE_FILE_FLAG)?, vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, ignore_duplicates: matches.get_flag(IGNORE_DUPLICATES_FLAG), + password: clap_utils::parse_optional(matches, PASSWORD)?, + fee_recipient: clap_utils::parse_optional(matches, FEE_RECIPIENT)?, + gas_limit: clap_utils::parse_optional(matches, GAS_LIMIT)?, + builder_proposals: clap_utils::parse_optional(matches, BUILDER_PROPOSALS)?, + builder_boost_factor: clap_utils::parse_optional(matches, BUILDER_BOOST_FACTOR)?, + prefer_builder_proposals: clap_utils::parse_optional( + matches, + PREFER_BUILDER_PROPOSALS, + )?, + enabled: clap_utils::parse_optional(matches, ENABLED)?, }) } } pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { let config = ImportConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { Ok(()) } else { @@ -113,27 +211,61 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() async fn run<'a>(config: ImportConfig) -> Result<(), String> { let ImportConfig { validators_file_path, + keystore_file_path, vc_url, vc_token_path, ignore_duplicates, + password, + fee_recipient, + gas_limit, + builder_proposals, + builder_boost_factor, + prefer_builder_proposals, + enabled, } = config; - if !validators_file_path.exists() { - return Err(format!("Unable to find file at {:?}", validators_file_path)); - } + let validators: Vec = + if let Some(validators_format_path) = &validators_file_path { + if !validators_format_path.exists() { + return Err(format!( + "Unable to find file at {:?}", + validators_format_path + )); + } - let validators_file = fs::OpenOptions::new() - .read(true) - .create(false) - .open(&validators_file_path) - .map_err(|e| format!("Unable to open {:?}: {:?}", validators_file_path, e))?; - let validators: Vec = serde_json::from_reader(&validators_file) - .map_err(|e| { - format!( - "Unable to parse JSON in {:?}: {:?}", - validators_file_path, e - ) - })?; + let validators_file = fs::OpenOptions::new() + .read(true) + .create(false) + .open(validators_format_path) + .map_err(|e| format!("Unable to open {:?}: {:?}", validators_format_path, e))?; + + serde_json::from_reader(&validators_file).map_err(|e| { + format!( + "Unable to parse JSON in {:?}: {:?}", + validators_format_path, e + ) + })? + } else if let Some(keystore_format_path) = &keystore_file_path { + vec![ValidatorSpecification { + voting_keystore: KeystoreJsonStr( + Keystore::from_json_file(keystore_format_path).map_err(|e| format!("{e:?}"))?, + ), + voting_keystore_password: password.ok_or_else(|| { + "The --password flag is required to supply the keystore password".to_string() + })?, + slashing_protection: None, + fee_recipient, + gas_limit, + builder_proposals, + builder_boost_factor, + prefer_builder_proposals, + enabled, + }] + } else { + return Err(format!( + "One of the flag --{VALIDATORS_FILE_FLAG} or --{KEYSTORE_FILE_FLAG} is required." + )); + }; let count = validators.len(); @@ -250,7 +382,10 @@ async fn run<'a>(config: ImportConfig) -> Result<(), String> { pub mod tests { use super::*; use crate::create_validators::tests::TestBuilder as CreateTestBuilder; - use std::fs; + use std::{ + fs::{self, File}, + str::FromStr, + }; use tempfile::{tempdir, TempDir}; use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; @@ -279,10 +414,18 @@ pub mod tests { Self { import_config: ImportConfig { // This field will be overwritten later on. - validators_file_path: dir.path().into(), + validators_file_path: Some(dir.path().into()), + keystore_file_path: Some(dir.path().into()), vc_url: vc.url.clone(), vc_token_path, ignore_duplicates: false, + password: Some(ZeroizeString::from_str("password").unwrap()), + fee_recipient: None, + builder_boost_factor: None, + gas_limit: None, + builder_proposals: None, + enabled: None, + prefer_builder_proposals: None, }, vc, create_dir: None, @@ -295,6 +438,10 @@ pub mod tests { self } + pub fn get_import_config(&self) -> ImportConfig { + self.import_config.clone() + } + pub async fn create_validators(mut self, count: u32, first_index: u32) -> Self { let create_result = CreateTestBuilder::default() .mutate_config(|config| { @@ -307,7 +454,55 @@ pub mod tests { create_result.result.is_ok(), "precondition: validators are created" ); - self.import_config.validators_file_path = create_result.validators_file_path(); + self.import_config.validators_file_path = Some(create_result.validators_file_path()); + self.create_dir = Some(create_result.output_dir); + self + } + + // Keystore JSON requires a different format when creating valdiators + pub async fn create_validators_keystore_format( + mut self, + count: u32, + first_index: u32, + ) -> Self { + let create_result = CreateTestBuilder::default() + .mutate_config(|config| { + config.count = count; + config.first_index = first_index; + }) + .run_test() + .await; + assert!( + create_result.result.is_ok(), + "precondition: validators are created" + ); + + let validators_file_path = create_result.validators_file_path(); + + let validators_file = fs::OpenOptions::new() + .read(true) + .create(false) + .open(&validators_file_path) + .map_err(|e| format!("Unable to open {:?}: {:?}", validators_file_path, e)) + .unwrap(); + + let validators: Vec = serde_json::from_reader(&validators_file) + .map_err(|e| { + format!( + "Unable to parse JSON in {:?}: {:?}", + validators_file_path, e + ) + }) + .unwrap(); + + let validator = &validators[0]; + let validator_json = validator.voting_keystore.0.clone(); + + let keystore_file = File::create(&validators_file_path).unwrap(); + let _ = validator_json.to_json_writer(keystore_file); + + self.import_config.keystore_file_path = Some(create_result.validators_file_path()); + self.import_config.password = Some(validator.voting_keystore_password.clone()); self.create_dir = Some(create_result.output_dir); self } @@ -327,7 +522,8 @@ pub mod tests { let local_validators: Vec = { let contents = - fs::read_to_string(&self.import_config.validators_file_path).unwrap(); + fs::read_to_string(&self.import_config.validators_file_path.unwrap()) + .unwrap(); serde_json::from_str(&contents).unwrap() }; let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; @@ -355,6 +551,39 @@ pub mod tests { vc: self.vc, } } + + pub async fn run_test_keystore_format(self) -> TestResult { + let result = run(self.import_config.clone()).await; + + if result.is_ok() { + self.vc.ensure_key_cache_consistency().await; + + let local_keystore: Keystore = + Keystore::from_json_file(&self.import_config.keystore_file_path.unwrap()) + .unwrap(); + + let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; + + assert_eq!( + 1, + list_keystores_response.len(), + "vc should have exactly the number of validators imported" + ); + + let local_pubkey = local_keystore.public_key().unwrap().into(); + let remote_validator = list_keystores_response + .iter() + .find(|validator| validator.validating_pubkey == local_pubkey) + .expect("validator must exist on VC"); + assert_eq!(&remote_validator.derivation_path, &local_keystore.path()); + assert_eq!(remote_validator.readonly, Some(false)); + } + + TestResult { + result, + vc: self.vc, + } + } } #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. @@ -445,4 +674,66 @@ pub mod tests { .await .assert_ok(); } + + #[tokio::test] + async fn create_one_validator_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + // Set validators_file_path to None so that keystore_file_path is used for tests with the keystore format + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 0) + .await + .run_test_keystore_format() + .await + .assert_ok(); + } + + #[tokio::test] + async fn create_one_validator_with_offset_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 42) + .await + .run_test_keystore_format() + .await + .assert_ok(); + } + + #[tokio::test] + async fn import_duplicates_when_disallowed_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 0) + .await + .import_validators_without_checks() + .await + .run_test_keystore_format() + .await + .assert_err_contains("DuplicateValidator"); + } + + #[tokio::test] + async fn import_duplicates_when_allowed_keystore_format() { + TestBuilder::new() + .await + .mutate_import_config(|config| { + config.ignore_duplicates = true; + config.validators_file_path = None; + }) + .create_validators_keystore_format(1, 0) + .await + .import_validators_without_checks() + .await + .run_test_keystore_format() + .await + .assert_ok(); + } } diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index 222dd7076d..8e43cd5977 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -8,7 +8,9 @@ use types::EthSpec; pub mod common; pub mod create_validators; +pub mod delete_validators; pub mod import_validators; +pub mod list_validators; pub mod move_validators; pub const CMD: &str = "validator_manager"; @@ -51,11 +53,14 @@ pub fn cli_app() -> Command { .help("Prints help information") .action(ArgAction::HelpLong) .display_order(0) - .help_heading(FLAG_HEADER), + .help_heading(FLAG_HEADER) + .global(true), ) .subcommand(create_validators::cli_app()) .subcommand(import_validators::cli_app()) .subcommand(move_validators::cli_app()) + .subcommand(list_validators::cli_app()) + .subcommand(delete_validators::cli_app()) } /// Run the account manager, returning an error if the operation did not succeed. @@ -83,6 +88,13 @@ pub fn run(matches: &ArgMatches, env: Environment) -> Result<(), Some((move_validators::CMD, matches)) => { move_validators::cli_run(matches, dump_config).await } + Some((list_validators::CMD, matches)) => { + list_validators::cli_run(matches, dump_config).await + } + Some((delete_validators::CMD, matches)) => { + delete_validators::cli_run(matches, dump_config).await + } + Some(("", _)) => Err("No command supplied. See --help.".to_string()), Some((unknown, _)) => Err(format!( "{} is not a valid {} command. See --help.", unknown, CMD diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs new file mode 100644 index 0000000000..7df85a7eb9 --- /dev/null +++ b/validator_manager/src/list_validators.rs @@ -0,0 +1,201 @@ +use clap::{Arg, ArgAction, ArgMatches, Command}; +use eth2::lighthouse_vc::types::SingleKeystoreResponse; +use eth2::SensitiveUrl; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; + +use crate::{common::vc_http_client, DumpConfig}; + +pub const CMD: &str = "list"; +pub const VC_URL_FLAG: &str = "vc-url"; +pub const VC_TOKEN_FLAG: &str = "vc-token"; + +pub fn cli_app() -> Command { + Command::new(CMD) + .about("Lists all validators in a validator client using the HTTP API.") + .arg( + Arg::new(VC_URL_FLAG) + .long(VC_URL_FLAG) + .value_name("HTTP_ADDRESS") + .help("A HTTP(S) address of a validator client using the keymanager-API.") + .default_value("http://localhost:5062") + .requires(VC_TOKEN_FLAG) + .action(ArgAction::Set) + .display_order(0), + ) + .arg( + Arg::new(VC_TOKEN_FLAG) + .long(VC_TOKEN_FLAG) + .value_name("PATH") + .help("The file containing a token required by the validator client.") + .action(ArgAction::Set) + .display_order(0), + ) +} + +#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] +pub struct ListConfig { + pub vc_url: SensitiveUrl, + pub vc_token_path: PathBuf, +} + +impl ListConfig { + fn from_cli(matches: &ArgMatches) -> Result { + Ok(Self { + vc_token_path: clap_utils::parse_required(matches, VC_TOKEN_FLAG)?, + vc_url: clap_utils::parse_required(matches, VC_URL_FLAG)?, + }) + } +} + +pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<(), String> { + let config = ListConfig::from_cli(matches)?; + if dump_config.should_exit_early(&config)? { + Ok(()) + } else { + run(config).await?; + Ok(()) + } +} + +async fn run<'a>(config: ListConfig) -> Result, String> { + let ListConfig { + vc_url, + vc_token_path, + } = config; + + let (_, validators) = vc_http_client(vc_url.clone(), &vc_token_path).await?; + + println!("List of validators ({}):", validators.len()); + + for validator in &validators { + println!("{}", validator.validating_pubkey); + } + + Ok(validators) +} + +#[cfg(not(debug_assertions))] +#[cfg(test)] +mod test { + use std::{ + fs::{self, File}, + io::Write, + }; + + use super::*; + use crate::{ + common::ValidatorSpecification, import_validators::tests::TestBuilder as ImportTestBuilder, + }; + use validator_client::http_api::{test_utils::ApiTester, Config as HttpConfig}; + + struct TestBuilder { + list_config: Option, + src_import_builder: Option, + http_config: HttpConfig, + vc_token: Option, + validators: Vec, + } + + impl TestBuilder { + async fn new() -> Self { + Self { + list_config: None, + src_import_builder: None, + http_config: ApiTester::default_http_config(), + vc_token: None, + validators: vec![], + } + } + + async fn with_validators(mut self, count: u32, first_index: u32) -> Self { + let builder = ImportTestBuilder::new_with_http_config(self.http_config.clone()) + .await + .create_validators(count, first_index) + .await; + self.list_config = Some(ListConfig { + vc_url: builder.get_import_config().vc_url, + vc_token_path: builder.get_import_config().vc_token_path, + }); + + self.vc_token = + Some(fs::read_to_string(builder.get_import_config().vc_token_path).unwrap()); + + let local_validators: Vec = { + let contents = + fs::read_to_string(builder.get_import_config().validators_file_path.unwrap()) + .unwrap(); + serde_json::from_str(&contents).unwrap() + }; + + self.validators = local_validators.clone(); + self.src_import_builder = Some(builder); + self + } + + pub async fn run_test(self) -> TestResult { + let import_test_result = self.src_import_builder.unwrap().run_test().await; + assert!(import_test_result.result.is_ok()); + + let path = self.list_config.clone().unwrap().vc_token_path; + let parent = path.parent().unwrap(); + + fs::create_dir_all(parent).expect("Was not able to create parent directory"); + + File::options() + .write(true) + .read(true) + .create(true) + .truncate(true) + .open(path) + .unwrap() + .write_all(self.vc_token.clone().unwrap().as_bytes()) + .unwrap(); + + let result = run(self.list_config.clone().unwrap()).await; + + if result.is_ok() { + let result_ref = result.as_ref().unwrap(); + + for local_validator in &self.validators { + let local_keystore = &local_validator.voting_keystore.0; + let local_pubkey = local_keystore.public_key().unwrap(); + assert!( + result_ref + .iter() + .any(|validator| validator.validating_pubkey + == local_pubkey.clone().into()), + "local validator pubkey not found in result" + ); + } + + return TestResult { result: Ok(()) }; + } + + TestResult { + result: Err(result.unwrap_err()), + } + } + } + + #[must_use] // Use the `assert_ok` or `assert_err` fns to "use" this value. + struct TestResult { + result: Result<(), String>, + } + + impl TestResult { + fn assert_ok(self) { + assert_eq!(self.result, Ok(())) + } + } + #[tokio::test] + async fn list_all_validators() { + TestBuilder::new() + .await + .with_validators(3, 0) + .await + .run_test() + .await + .assert_ok(); + } +} diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 91bc2b0ef8..7651917ea9 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -2,7 +2,6 @@ use super::common::*; use crate::DumpConfig; use account_utils::{read_password_from_user, ZeroizeString}; use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::FLAG_HEADER; use eth2::{ lighthouse_vc::{ std_types::{ @@ -75,15 +74,6 @@ pub fn cli_app() -> Command { command. This command only supports validators signing via a keystore on the local \ file system (i.e., not Web3Signer validators).", ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .arg( Arg::new(SRC_VC_URL_FLAG) .long(SRC_VC_URL_FLAG)