diff --git a/.cargo/config.toml b/.cargo/config.toml index dac0163003..a408305c4d 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,4 +1,3 @@ [env] # Set the number of arenas to 16 when using jemalloc. JEMALLOC_SYS_WITH_MALLOC_CONF = "abort_conf:true,narenas:16" - diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 031a88b03c..e9db3b6ab1 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -11,7 +11,7 @@ concurrency: jobs: build-and-upload-to-s3: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 0ee9dbb622..0f91c86617 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -65,8 +65,7 @@ jobs: - name: Install dependencies run: apt update && apt install -y cmake libclang-dev - name: Check for deadlocks - run: | - cargo lockbud -k deadlock -b -l tokio_util + run: ./scripts/ci/check-lockbud.sh target-branch-check: name: target-branch-check diff --git a/Cargo.lock b/Cargo.lock index 77c78f0f00..cac5905593 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "Inflector" @@ -46,7 +46,7 @@ dependencies = [ "eth2_keystore", "eth2_wallet", "filesystem", - "rand", + "rand 0.8.5", "regex", "rpassword", "serde", @@ -135,7 +135,7 @@ dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-consensus" @@ -204,9 +204,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.12" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" +checksum = "bc1360603efdfba91151e623f13a4f4d3dc4af4adc1cbd90bf37c81e84db4c77" dependencies = [ "alloy-rlp", "arbitrary", @@ -216,19 +216,18 @@ dependencies = [ "derive_arbitrary", "derive_more 1.0.0", "foldhash", - "getrandom", - "hashbrown 0.15.1", - "hex-literal", - "indexmap 2.6.0", + "getrandom 0.2.15", + "hashbrown 0.15.2", + "indexmap 2.7.1", "itoa", "k256 0.13.4", "keccak-asm", "paste", "proptest", "proptest-derive", - "rand", + "rand 0.8.5", "ruint", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "sha3 0.10.8", "tiny-keccak", @@ -236,9 +235,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" +checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -247,13 +246,13 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" +checksum = "a40e1ef334153322fd878d07e86af7a529bcb86b2439525920a88eba87bcf943" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -318,19 +317,20 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "3.0.6" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ "anstyle", + "once_cell", "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arbitrary" @@ -467,7 +467,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -477,7 +477,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", ] [[package]] @@ -522,7 +522,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", "synstructure", ] @@ -534,7 +534,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -543,6 +543,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-channel" version = "1.9.0" @@ -567,7 +577,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.41", + "rustix 0.38.44", "slab", "tracing", "windows-sys 0.59.0", @@ -579,20 +589,31 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "pin-project-lite", ] [[package]] -name = "async-trait" -version = "0.1.83" +name = "async-recursion" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", +] + +[[package]] +name = "async-trait" +version = "0.1.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", ] [[package]] @@ -619,6 +640,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "attohttpc" version = "0.24.1" @@ -643,13 +670,13 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -668,10 +695,10 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.1", + "hyper 1.6.0", "hyper-util", "itoa", "matchit", @@ -701,7 +728,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", "mime", @@ -804,7 +831,7 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "proto_array", - "rand", + "rand 0.8.5", "rayon", "safe_arith", "sensitive_url", @@ -848,7 +875,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.5.1", + "hyper 1.6.0", "lighthouse_network", "monitoring_api", "node_test_rig", @@ -867,9 +894,11 @@ dependencies = [ name = "beacon_node_fallback" version = "0.1.0" dependencies = [ + "clap", "eth2", "futures", "itertools 0.10.5", + "logging", "serde", "slot_clock", "strum", @@ -878,6 +907,7 @@ dependencies = [ "tracing", "types", "validator_metrics", + "validator_test_rig", ] [[package]] @@ -917,7 +947,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -930,7 +960,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.89", + "syn 2.0.98", "which", ] @@ -957,9 +987,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "bitvec" @@ -1031,7 +1061,7 @@ dependencies = [ "ethereum_ssz", "fixed_bytes", "hex", - "rand", + "rand 0.8.5", "safe_arith", "serde", "tree_hash", @@ -1061,7 +1091,7 @@ dependencies = [ "ff 0.13.0", "group 0.13.0", "pairing", - "rand_core", + "rand_core 0.6.4", "serde", "subtle", ] @@ -1119,17 +1149,19 @@ name = "builder_client" version = "0.1.0" dependencies = [ "eth2", + "ethereum_ssz", "lighthouse_version", "reqwest", "sensitive_url", "serde", + "serde_json", ] [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "byte-slice-cast" @@ -1145,9 +1177,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] @@ -1199,9 +1231,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" dependencies = [ "serde", ] @@ -1214,7 +1246,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.23", + "semver 1.0.25", "serde", "serde_json", "thiserror 1.0.69", @@ -1228,9 +1260,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.1" +version = "1.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" +checksum = "e4730490333d58093109dc02c23174c3f4d490998c3fed3cc8e82d57afedb9cf" dependencies = [ "jobserver", "libc", @@ -1284,9 +1316,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1354,9 +1386,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" dependencies = [ "clap_builder", "clap_derive", @@ -1364,9 +1396,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" dependencies = [ "anstream", "anstyle", @@ -1377,21 +1409,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] name = "clap_lex" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "clap_utils" @@ -1452,9 +1484,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.51" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" dependencies = [ "cc", ] @@ -1465,6 +1497,16 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +[[package]] +name = "colored" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" +dependencies = [ + "lazy_static", + "windows-sys 0.59.0", +] + [[package]] name = "compare_fields" version = "0.2.0" @@ -1492,9 +1534,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487981fa1af147182687064d0a2c336586d337a606595ced9ffb0c685c250c73" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" dependencies = [ "cfg-if", "cpufeatures", @@ -1548,18 +1590,18 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] [[package]] name = "crate_crypto_internal_eth_kzg_bls12_381" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23be5253f1bd7fd411721a58712308c4747d0a41d040bbf8ebb78d52909a480" +checksum = "48603155907d588e487aea229f61a28d9a918c95c9aa987055ba29502225810b" dependencies = [ "blst", "blstrs", @@ -1571,9 +1613,9 @@ dependencies = [ [[package]] name = "crate_crypto_internal_eth_kzg_erasure_codes" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2067ce20ef380ff33a93ce0af62bea22d35531b7f3586224d8d5176ec6cf578" +checksum = "cdf616e4b4f1799191bb1e70b8a29f65e95ab5d74c59972a34998de488d01efd" dependencies = [ "crate_crypto_internal_eth_kzg_bls12_381", "crate_crypto_internal_eth_kzg_polynomial", @@ -1581,24 +1623,24 @@ dependencies = [ [[package]] name = "crate_crypto_internal_eth_kzg_maybe_rayon" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558f50324ff016e5fe93113c78a72776d790d52f244ae9602a8013a67a189b66" +checksum = "f1ddd0330f34f0b92a9f0b29bc3f8494b30d596ab8b951233ec90b2d72ab132c" [[package]] name = "crate_crypto_internal_eth_kzg_polynomial" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e051c4f5aa5696bd7c504930485436ec62bf14f30a4c2d78504f3f8ec6a3daf" +checksum = "7488314261926373e1c20121c404fabf5b57ca09f48eddc7fef38be1df79a006" dependencies = [ "crate_crypto_internal_eth_kzg_bls12_381", ] [[package]] name = "crate_crypto_kzg_multi_open_fk20" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ed6bf8993d9f3b361da4ed38f067503e08c0b948af0d6f4bb941dd647c0f2c" +checksum = "d24efdb64e7518848f11069dd9de23bd04455146a9fd5486345d99ed8bfdb049" dependencies = [ "crate_crypto_internal_eth_kzg_bls12_381", "crate_crypto_internal_eth_kzg_maybe_rayon", @@ -1654,18 +1696,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1682,15 +1724,15 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" [[package]] name = "crypto-bigint" @@ -1699,7 +1741,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1711,7 +1753,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -1723,7 +1765,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "typenum", ] @@ -1799,7 +1841,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -1847,7 +1889,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -1869,7 +1911,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -1900,15 +1942,15 @@ checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" [[package]] name = "data-encoding-macro" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" +checksum = "5b16d9d0d88a5273d830dac8b78ceb217ffc9b1d5404e5597a3542515329405b" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1916,12 +1958,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" +checksum = "1145d32e826a7748b69ee8fc62d3e6355ff7f1051df53141e7048162fc90481b" dependencies = [ "data-encoding", - "syn 1.0.109", + "syn 2.0.98", ] [[package]] @@ -2035,7 +2077,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -2048,7 +2090,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -2068,17 +2110,17 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", "unicode-xid", ] [[package]] name = "diesel" -version = "2.2.5" +version = "2.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf9649c05e0a9dbd6d0b0b8301db5182b972d0fd02f0a7c6736cf632d7c0fd5" +checksum = "04001f23ba8843dc315804fa324000376084dfb1c30794ff68dd279e6e5696d5" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "byteorder", "diesel_derives", "itoa", @@ -2096,7 +2138,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -2116,7 +2158,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -2192,9 +2234,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "898d136ecb64116ec68aecf14d889bd30f8b1fe0c19e262953f7388dbe77052e" +checksum = "c4b4e7798d2ff74e29cee344dc490af947ae657d6ab5273dde35d58ce06a4d71" dependencies = [ "aes 0.8.4", "aes-gcm", @@ -2214,7 +2256,7 @@ dependencies = [ "more-asserts", "multiaddr", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "smallvec", "socket2", "tokio", @@ -2231,7 +2273,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -2254,16 +2296,16 @@ dependencies = [ [[package]] name = "dsl_auto_type" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d9abe6314103864cc2d8901b7ae224e0ab1a103a0a416661b4097b0779b607" +checksum = "139ae9aca7527f85f26dd76483eb38533fd84bd571065da1739656ef71c5ff5b" dependencies = [ "darling 0.20.10", "either", "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -2322,7 +2364,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2 0.10.8", "subtle", @@ -2380,7 +2422,7 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "rand_core", + "rand_core 0.6.4", "sec1 0.3.0", "subtle", "zeroize", @@ -2400,7 +2442,7 @@ dependencies = [ "group 0.13.0", "pem-rfc7468", "pkcs8 0.10.2", - "rand_core", + "rand_core 0.6.4", "sec1 0.7.3", "subtle", "zeroize", @@ -2428,7 +2470,7 @@ dependencies = [ "hex", "k256 0.13.4", "log", - "rand", + "rand 0.8.5", "serde", "sha3 0.10.8", "zeroize", @@ -2443,7 +2485,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -2507,12 +2549,12 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2563,6 +2605,7 @@ name = "eth2" version = "0.1.0" dependencies = [ "derivative", + "either", "enr", "eth2_keystore", "ethereum_serde_utils", @@ -2630,7 +2673,7 @@ dependencies = [ "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand", + "rand 0.8.5", "scrypt", "serde", "serde_json", @@ -2638,7 +2681,7 @@ dependencies = [ "sha2 0.9.9", "tempfile", "unicode-normalization", - "uuid", + "uuid 0.8.2", "zeroize", ] @@ -2672,13 +2715,13 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand", + "rand 0.8.5", "serde", "serde_json", "serde_repr", "tempfile", "tiny-bip39", - "uuid", + "uuid 0.8.2", ] [[package]] @@ -2824,7 +2867,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -2857,7 +2900,7 @@ dependencies = [ "dunce", "ethers-core", "eyre", - "getrandom", + "getrandom 0.2.15", "hex", "proc-macro2", "quote", @@ -2903,7 +2946,7 @@ dependencies = [ "k256 0.11.6", "once_cell", "open-fastrlp", - "rand", + "rand 0.8.5", "rlp", "rlp-derive", "serde", @@ -2928,7 +2971,7 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom", + "getrandom 0.2.15", "hashers", "hex", "http 0.2.12", @@ -2958,9 +3001,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.3.1" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -2969,11 +3012,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "pin-project-lite", ] @@ -3039,7 +3082,7 @@ dependencies = [ "metrics", "parking_lot 0.12.3", "pretty_reqwest_error", - "rand", + "rand 0.8.5", "reqwest", "sensitive_url", "serde", @@ -3087,9 +3130,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fastrlp" @@ -3102,6 +3145,17 @@ dependencies = [ "bytes", ] +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "fdlimit" version = "0.3.0" @@ -3118,7 +3172,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3129,7 +3183,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ "bitvec 1.0.1", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3170,7 +3224,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -3182,7 +3236,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -3214,9 +3268,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "foreign-types" @@ -3341,9 +3395,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" dependencies = [ "futures-core", "pin-project-lite", @@ -3357,7 +3411,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -3367,7 +3421,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.18", + "rustls 0.23.22", "rustls-pki-types", ] @@ -3420,6 +3474,19 @@ dependencies = [ "byteorder", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -3461,10 +3528,22 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "ghash" version = "0.5.1" @@ -3498,14 +3577,14 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gloo-timers" @@ -3532,7 +3611,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "getrandom", + "getrandom 0.2.15", "hashlink 0.9.1", "hex_fmt", "libp2p", @@ -3540,7 +3619,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "quickcheck", - "rand", + "rand 0.8.5", "regex", "serde", "sha2 0.10.8", @@ -3568,7 +3647,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3579,8 +3658,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand", - "rand_core", + "rand 0.8.5", + "rand_core 0.6.4", "rand_xorshift", "subtle", ] @@ -3597,7 +3676,26 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.6.0", + "indexmap 2.7.1", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.2.0", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -3647,9 +3745,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.1" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ "allocator-api2", "equivalent", @@ -3760,12 +3858,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hex-literal" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" - [[package]] name = "hex_fmt" version = "0.3.0" @@ -3774,10 +3866,11 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hickory-proto" -version = "0.24.1" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +checksum = "d063c0692ee669aa6d261988aa19ca5510f1cc40e4f211024f50c888499a35d7" dependencies = [ + "async-recursion", "async-trait", "cfg-if", "data-encoding", @@ -3785,12 +3878,12 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.4.0", + "idna", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "socket2", - "thiserror 1.0.69", + "thiserror 2.0.11", "tinyvec", "tokio", "tracing", @@ -3799,21 +3892,21 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.25.0-alpha.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "42bc352e4412fb657e795f79b4efcf2bd60b59ee5ca0187f3554194cd1107a27" dependencies = [ "cfg-if", "futures-util", "hickory-proto", "ipconfig", - "lru-cache", + "moka", "once_cell", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -3869,11 +3962,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3900,9 +3993,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -3927,7 +4020,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http 1.2.0", ] [[package]] @@ -3938,7 +4031,7 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "pin-project-lite", ] @@ -3971,7 +4064,7 @@ dependencies = [ "operation_pool", "parking_lot 0.12.3", "proto_array", - "rand", + "rand 0.8.5", "safe_arith", "sensitive_url", "serde", @@ -4015,9 +4108,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -4033,15 +4126,15 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "httparse", @@ -4057,14 +4150,15 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", + "h2 0.4.7", + "http 1.2.0", "http-body 1.0.1", "httparse", "httpdate", @@ -4072,6 +4166,7 @@ dependencies = [ "pin-project-lite", "smallvec", "tokio", + "want", ] [[package]] @@ -4082,7 +4177,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -4095,7 +4190,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.31", + "hyper 0.14.32", "native-tls", "tokio", "tokio-native-tls", @@ -4108,13 +4203,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", + "futures-channel", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.1", + "hyper 1.6.0", "pin-project-lite", + "socket2", "tokio", "tower-service", + "tracing", ] [[package]] @@ -4255,7 +4353,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -4264,16 +4362,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.0.3" @@ -4325,23 +4413,46 @@ dependencies = [ "rtnetlink", "system-configuration 0.6.1", "tokio", - "windows", + "windows 0.53.0", ] [[package]] name = "igd-next" -version = "0.14.3" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +checksum = "76b0d7d4541def58a37bf8efc559683f21edce7c82f0d866c93ac21f7e098f93" dependencies = [ "async-trait", "attohttpc", "bytes", "futures", - "http 0.2.12", - "hyper 0.14.31", + "http 1.2.0", + "http-body-util", + "hyper 1.6.0", + "hyper-util", "log", - "rand", + "rand 0.8.5", + "tokio", + "url", + "xmltree", +] + +[[package]] +name = "igd-next" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2830127baaaa55dae9aa5ee03158d5aa3687a9c2c11ce66870452580cc695df4" +dependencies = [ + "async-trait", + "attohttpc", + "bytes", + "futures", + "http 1.2.0", + "http-body-util", + "hyper 1.6.0", + "hyper-util", + "log", + "rand 0.8.5", "tokio", "url", "xmltree", @@ -4362,7 +4473,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.7.0", + "parity-scale-codec 3.6.12", ] [[package]] @@ -4400,7 +4511,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -4421,13 +4532,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.1", + "hashbrown 0.15.2", "serde", ] @@ -4443,7 +4554,7 @@ dependencies = [ "lockfile", "metrics", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "reqwest", "serde", "serde_json", @@ -4521,19 +4632,19 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "is-terminal" -version = "0.4.13" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" dependencies = [ "hermit-abi 0.4.0", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4571,9 +4682,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jobserver" @@ -4586,10 +4697,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -4759,9 +4871,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.164" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libflate" @@ -4789,9 +4901,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -4820,15 +4932,15 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.54.1" +version = "0.55.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" +checksum = "b72dc443ddd0254cb49a794ed6b6728400ee446a0f7ab4a07d0209ee98de20e9" dependencies = [ "bytes", "either", "futures", "futures-timer", - "getrandom", + "getrandom 0.2.15", "libp2p-allow-block-list", "libp2p-connection-limits", "libp2p-core", @@ -4847,38 +4959,36 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror 1.0.69", + "thiserror 2.0.11", ] [[package]] name = "libp2p-allow-block-list" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" +checksum = "38944b7cb981cc93f2f0fb411ff82d0e983bd226fbcc8d559639a3a73236568b" dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "void", ] [[package]] name = "libp2p-connection-limits" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +checksum = "efe9323175a17caa8a2ed4feaf8a548eeef5e0b72d03840a0eab4bcb0210ce1c" dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "void", ] [[package]] name = "libp2p-core" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" +checksum = "193c75710ba43f7504ad8f58a62ca0615b1d7e572cb0f1780bc607252c39e9ef" dependencies = [ "either", "fnv", @@ -4892,21 +5002,19 @@ dependencies = [ "parking_lot 0.12.3", "pin-project", "quick-protobuf", - "rand", + "rand 0.8.5", "rw-stream-sink", - "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", "unsigned-varint 0.8.0", - "void", "web-time", ] [[package]] name = "libp2p-dns" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" +checksum = "1b780a1150214155b0ed1cdf09fbd2e1b0442604f9146a431d1b21d23eef7bd7" dependencies = [ "async-trait", "futures", @@ -4920,9 +5028,9 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" +checksum = "e8c06862544f02d05d62780ff590cc25a75f5c2b9df38ec7a370dcae8bb873cf" dependencies = [ "asynchronous-codec", "either", @@ -4932,13 +5040,11 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru", "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", - "void", ] [[package]] @@ -4955,7 +5061,7 @@ dependencies = [ "multihash", "p256", "quick-protobuf", - "rand", + "rand 0.8.5", "sec1 0.7.3", "sha2 0.10.8", "thiserror 1.0.69", @@ -4965,30 +5071,28 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" +checksum = "11d0ba095e1175d797540e16b62e7576846b883cb5046d4159086837b36846cc" dependencies = [ - "data-encoding", "futures", "hickory-proto", "if-watch", "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand", + "rand 0.8.5", "smallvec", "socket2", "tokio", "tracing", - "void", ] [[package]] name = "libp2p-metrics" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" +checksum = "2ce58c64292e87af624fcb86465e7dd8342e46a388d71e8fec0ab37ee789630a" dependencies = [ "futures", "libp2p-core", @@ -5002,9 +5106,9 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41187ab8f6c835ad864edf94224f666f636ee2d270601422c1441f739e0abccc" +checksum = "8aaa6fee3722e355443058472fc4705d78681bc2d8e447a0bdeb3fecf40cd197" dependencies = [ "asynchronous-codec", "bytes", @@ -5013,7 +5117,7 @@ dependencies = [ "libp2p-identity", "nohash-hasher", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "smallvec", "tracing", "unsigned-varint 0.8.0", @@ -5021,13 +5125,12 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" +checksum = "afcc133e0f3cea07acde6eb8a9665cb11b600bd61110b010593a0210b8153b16" dependencies = [ "asynchronous-codec", "bytes", - "curve25519-dalek", "futures", "libp2p-core", "libp2p-identity", @@ -5035,11 +5138,10 @@ dependencies = [ "multihash", "once_cell", "quick-protobuf", - "rand", - "sha2 0.10.8", + "rand 0.8.5", "snow", "static_assertions", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", "x25519-dalek", "zeroize", @@ -5047,9 +5149,9 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63d926c6be56a2489e0e7316b17fe95a70bc5c4f3e85740bb3e67c0f3c6a44" +checksum = "7e659439578fc6d305da8303834beb9d62f155f40e7f5b9d81c9f2b2c69d1926" dependencies = [ "asynchronous-codec", "bytes", @@ -5063,33 +5165,31 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" +checksum = "41432a159b00424a0abaa2c80d786cddff81055ac24aa127e0cf375f7858d880" dependencies = [ - "bytes", "futures", "futures-timer", "if-watch", "libp2p-core", "libp2p-identity", "libp2p-tls", - "parking_lot 0.12.3", "quinn", - "rand", + "rand 0.8.5", "ring 0.17.8", - "rustls 0.23.18", + "rustls 0.23.22", "socket2", - "thiserror 1.0.69", + "thiserror 2.0.11", "tokio", "tracing", ] [[package]] name = "libp2p-swarm" -version = "0.45.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" +checksum = "803399b4b6f68adb85e63ab573ac568154b193e9a640f03e0f2890eabbcb37f8" dependencies = [ "either", "fnv", @@ -5101,11 +5201,10 @@ dependencies = [ "lru", "multistream-select", "once_cell", - "rand", + "rand 0.8.5", "smallvec", "tokio", "tracing", - "void", "web-time", ] @@ -5118,21 +5217,20 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] name = "libp2p-tcp" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" +checksum = "65346fb4d36035b23fec4e7be4c320436ba53537ce9b6be1d1db1f70c905cad0" dependencies = [ "futures", "futures-timer", "if-watch", "libc", "libp2p-core", - "libp2p-identity", "socket2", "tokio", "tracing", @@ -5140,9 +5238,9 @@ dependencies = [ [[package]] name = "libp2p-tls" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" +checksum = "dcaebc1069dea12c5b86a597eaaddae0317c2c2cb9ec99dc94f82fd340f5c78b" dependencies = [ "futures", "futures-rustls", @@ -5150,39 +5248,38 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.18", + "rustls 0.23.22", "rustls-webpki 0.101.7", - "thiserror 1.0.69", + "thiserror 2.0.11", "x509-parser", "yasna", ] [[package]] name = "libp2p-upnp" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" +checksum = "d457b9ecceb66e7199f049926fad447f1f17f040e8d29d690c086b4cab8ed14a" dependencies = [ "futures", "futures-timer", - "igd-next", + "igd-next 0.15.1", "libp2p-core", "libp2p-swarm", "tokio", "tracing", - "void", ] [[package]] name = "libp2p-yamux" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" +checksum = "f15df094914eb4af272acf9adaa9e287baa269943f32ea348ba29cfb9bfc60d8" dependencies = [ "either", "futures", "libp2p-core", - "thiserror 1.0.69", + "thiserror 2.0.11", "tracing", "yamux 0.12.1", "yamux 0.13.4", @@ -5194,7 +5291,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "libc", ] @@ -5211,7 +5308,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand", + "rand 0.8.5", "serde", "sha2 0.9.9", "typenum", @@ -5259,9 +5356,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "pkg-config", @@ -5302,6 +5399,7 @@ dependencies = [ "slasher", "slashing_protection", "slog", + "store", "task_executor", "tempfile", "types", @@ -5336,6 +5434,7 @@ dependencies = [ "libp2p", "libp2p-mplex", "lighthouse_version", + "local-ip-address", "logging", "lru", "lru_cache", @@ -5344,7 +5443,7 @@ dependencies = [ "prometheus-client", "quickcheck", "quickcheck_macros", - "rand", + "rand 0.8.5", "regex", "serde", "sha2 0.9.9", @@ -5396,12 +5495,6 @@ dependencies = [ "target_info", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.1.4" @@ -5410,9 +5503,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -5441,6 +5534,18 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "local-ip-address" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" +dependencies = [ + "libc", + "neli", + "thiserror 1.0.69", + "windows-sys 0.59.0", +] + [[package]] name = "lock_api" version = "0.4.12" @@ -5461,9 +5566,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" [[package]] name = "logging" @@ -5486,22 +5591,26 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "lru" version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.1", -] - -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", + "hashbrown 0.15.2", ] [[package]] @@ -5717,22 +5826,21 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -5742,6 +5850,49 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9366861eb2a2c436c20b12c8dbec5f798cea6b47ad99216be0282942e2c81ea0" +[[package]] +name = "mockito" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "652cd6d169a36eaf9d1e6bce1a221130439a966d7f27858af66a33a66e9c4ee2" +dependencies = [ + "assert-json-diff", + "bytes", + "colored", + "futures-util", + "http 1.2.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-util", + "log", + "rand 0.8.5", + "regex", + "serde_json", + "serde_urlencoded", + "similar", + "tokio", +] + +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot 0.12.3", + "portable-atomic", + "rustc_version 0.4.1", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid 1.12.1", +] + [[package]] name = "monitoring_api" version = "0.1.0" @@ -5799,9 +5950,9 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.2" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" dependencies = [ "core2", "unsigned-varint 0.8.0", @@ -5823,9 +5974,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c" dependencies = [ "libc", "log", @@ -5838,6 +5989,31 @@ dependencies = [ "tempfile", ] +[[package]] +name = "neli" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +dependencies = [ + "byteorder", + "libc", + "log", + "neli-proc-macros", +] + +[[package]] +name = "neli-proc-macros" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +dependencies = [ + "either", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", +] + [[package]] name = "netlink-packet-core" version = "0.7.0" @@ -5877,24 +6053,23 @@ dependencies = [ [[package]] name = "netlink-proto" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" +checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror 1.0.69", - "tokio", + "thiserror 2.0.11", ] [[package]] name = "netlink-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" dependencies = [ "bytes", "futures", @@ -5925,7 +6100,7 @@ dependencies = [ "genesis", "gossipsub", "hex", - "igd-next", + "igd-next 0.16.0", "itertools 0.10.5", "kzg", "lighthouse_network", @@ -5935,7 +6110,7 @@ dependencies = [ "metrics", "operation_pool", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "serde_json", "slog", "slog-async", @@ -5980,7 +6155,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if", "cfg_aliases", "libc", @@ -6060,7 +6235,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand", + "rand 0.8.5", "serde", "smallvec", "zeroize", @@ -6114,9 +6289,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -6182,11 +6357,11 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.68" +version = "0.10.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "cfg-if", "foreign-types", "libc", @@ -6203,14 +6378,14 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" @@ -6223,9 +6398,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.104" +version = "0.9.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +checksum = "8b22d5b84be05a8d6947c7cb71f7c849aa0f112acd4bf51c2a7c1c988ac0a9dc" dependencies = [ "cc", "libc", @@ -6247,7 +6422,7 @@ dependencies = [ "maplit", "metrics", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "rayon", "serde", "state_processing", @@ -6299,16 +6474,15 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec", "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.7.0", - "rustversion", + "parity-scale-codec-derive 3.6.12", "serde", ] @@ -6326,14 +6500,14 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 1.0.109", ] [[package]] @@ -6385,7 +6559,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall 0.5.8", "smallvec", "windows-targets 0.52.6", ] @@ -6397,7 +6571,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -6455,12 +6629,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 1.0.69", + "thiserror 2.0.11", "ucd-trie", ] @@ -6476,47 +6650,47 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_shared", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -6594,7 +6768,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.41", + "rustix 0.38.44", "tracing", "windows-sys 0.59.0", ] @@ -6623,10 +6797,16 @@ dependencies = [ ] [[package]] -name = "postgres-protocol" -version = "0.6.7" +name = "portable-atomic" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acda0ebdebc28befa84bee35e651e4c5f09073d668c7aed4cf7e23c3cda84b23" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" + +[[package]] +name = "postgres-protocol" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ff0abab4a9b844b93ef7b81f1efc0a366062aaef2cd702c76256b5dc075c54" dependencies = [ "base64 0.22.1", "byteorder", @@ -6635,16 +6815,16 @@ dependencies = [ "hmac 0.12.1", "md-5", "memchr", - "rand", + "rand 0.9.0", "sha2 0.10.8", "stringprep", ] [[package]] name = "postgres-types" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" +checksum = "613283563cd90e1dfc3518d548caee47e0e725455ed619881f5cf21f36de4b48" dependencies = [ "bytes", "fallible-iterator", @@ -6663,15 +6843,16 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] name = "pq-sys" -version = "0.6.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6cc05d7ea95200187117196eee9edd0644424911821aeb28a18ce60ea0b8793" +checksum = "30b51d65ebe1cb1f40641b15abae017fed35ccdda46e3dab1ff8768f625a3222" dependencies = [ + "libc", "vcpkg", ] @@ -6685,12 +6866,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.25" +version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -6745,14 +6926,14 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.22", + "toml_edit 0.22.23", ] [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -6807,7 +6988,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -6818,11 +6999,11 @@ checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.6.0", + "bitflags 2.8.0", "lazy_static", "num-traits", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_xorshift", "regex-syntax 0.8.5", "rusty-fork", @@ -6832,13 +7013,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -6914,7 +7095,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "env_logger 0.8.4", "log", - "rand", + "rand 0.8.5", ] [[package]] @@ -6939,10 +7120,10 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustc-hash 2.1.0", + "rustls 0.23.22", "socket2", - "thiserror 2.0.3", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -6954,14 +7135,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", - "getrandom", - "rand", + "getrandom 0.2.15", + "rand 0.8.5", "ring 0.17.8", - "rustc-hash 2.0.0", - "rustls 0.23.18", + "rustc-hash 2.1.0", + "rustls 0.23.22", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.11", "tinyvec", "tracing", "web-time", @@ -6969,9 +7150,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.7" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ "cfg_aliases", "libc", @@ -6983,9 +7164,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -7030,11 +7211,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", "serde", ] +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.0", + "zerocopy 0.8.14", +] + [[package]] name = "rand_chacha" version = "0.3.1" @@ -7042,7 +7234,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.0", ] [[package]] @@ -7051,7 +7253,17 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +dependencies = [ + "getrandom 0.3.1", + "zerocopy 0.8.14", ] [[package]] @@ -7060,7 +7272,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -7097,9 +7309,9 @@ dependencies = [ [[package]] name = "redb" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b1de48a7cf7ba193e81e078d17ee2b786236eed1d3f7c60f8a09545efc4925" +checksum = "ea0a72cd7140de9fc3e318823b883abf819c20d478ec89ce880466dc2ef263c6" dependencies = [ "libc", ] @@ -7115,11 +7327,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", ] [[package]] @@ -7128,7 +7340,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom", + "getrandom 0.2.15", "libredox", "thiserror 1.0.69", ] @@ -7188,10 +7400,10 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.31", + "hyper 0.14.32", "hyper-rustls", "hyper-tls", "ipnet", @@ -7293,7 +7505,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -7366,22 +7578,24 @@ dependencies = [ [[package]] name = "ruint" -version = "1.12.3" +version = "1.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +checksum = "f5ef8fb1dd8de3870cb8400d51b4c2023854bbafd5431a3ac7e7317243e22d2f" dependencies = [ "alloy-rlp", "arbitrary", "ark-ff 0.3.0", "ark-ff 0.4.2", "bytes", - "fastrlp", + "fastrlp 0.3.1", + "fastrlp 0.4.0", "num-bigint", + "num-integer", "num-traits", - "parity-scale-codec 3.7.0", + "parity-scale-codec 3.6.12", "primitive-types 0.12.2", "proptest", - "rand", + "rand 0.8.5", "rlp", "ruint-macro", "serde", @@ -7411,9 +7625,9 @@ dependencies = [ [[package]] name = "rust_eth_kzg" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3291fd0d9c629a56537d74bbc1e7bcaf5be610f2f7b55af85c4fea843c6aeca3" +checksum = "a237a478ee68e491a0f40bbcbb958b79ba9b37aacce459f7ab3ba78f3cbfa9d0" dependencies = [ "crate_crypto_internal_eth_kzg_bls12_381", "crate_crypto_internal_eth_kzg_erasure_codes", @@ -7437,9 +7651,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc-hex" @@ -7462,7 +7676,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.23", + "semver 1.0.25", ] [[package]] @@ -7490,15 +7704,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "errno", "libc", - "linux-raw-sys 0.4.14", - "windows-sys 0.52.0", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", ] [[package]] @@ -7529,9 +7743,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.18" +version = "0.23.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" +checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" dependencies = [ "once_cell", "ring 0.17.8", @@ -7561,9 +7775,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" dependencies = [ "web-time", ] @@ -7591,9 +7805,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rusty-fork" @@ -7620,9 +7834,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "safe_arith" @@ -7654,7 +7868,7 @@ checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "cfg-if", "derive_more 1.0.0", - "parity-scale-codec 3.7.0", + "parity-scale-codec 3.6.12", "scale-info-derive", ] @@ -7667,7 +7881,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -7756,7 +7970,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "core-foundation", "core-foundation-sys", "libc", @@ -7765,9 +7979,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -7784,9 +7998,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" dependencies = [ "serde", ] @@ -7822,9 +8036,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.215" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -7841,20 +8055,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ "itoa", "memchr", @@ -7880,7 +8094,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -7932,7 +8146,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -8037,7 +8251,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -8047,7 +8261,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -8067,14 +8281,20 @@ dependencies = [ ] [[package]] -name = "simple_asn1" -version = "0.6.2" +name = "similar" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" + +[[package]] +name = "simple_asn1" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 1.0.69", + "thiserror 2.0.11", "time", ] @@ -8099,9 +8319,9 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" @@ -8131,7 +8351,7 @@ dependencies = [ "maplit", "metrics", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "rayon", "redb", "safe_arith", @@ -8315,7 +8535,7 @@ dependencies = [ "blake2", "chacha20poly1305", "curve25519-dalek", - "rand_core", + "rand_core 0.6.4", "ring 0.17.8", "rustc_version 0.4.1", "sha2 0.10.8", @@ -8324,9 +8544,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -8405,7 +8625,7 @@ dependencies = [ "itertools 0.10.5", "merkle_proof", "metrics", - "rand", + "rand 0.8.5", "rayon", "safe_arith", "smallvec", @@ -8450,7 +8670,8 @@ dependencies = [ "lru", "metrics", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", + "redb", "safe_arith", "serde", "slog", @@ -8553,9 +8774,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -8582,7 +8803,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -8617,7 +8838,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.8.0", "core-foundation", "system-configuration-sys 0.6.0", ] @@ -8653,6 +8874,12 @@ dependencies = [ "types", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "take_mut" version = "0.2.2" @@ -8694,14 +8921,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" dependencies = [ "cfg-if", "fastrand", + "getrandom 0.3.1", "once_cell", - "rustix 0.38.41", + "rustix 0.38.44", "windows-sys 0.59.0", ] @@ -8727,11 +8955,11 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9" dependencies = [ - "rustix 0.38.41", + "rustix 0.38.44", "windows-sys 0.59.0", ] @@ -8762,7 +8990,7 @@ dependencies = [ "hex", "hmac 0.12.1", "log", - "rand", + "rand 0.8.5", "serde", "serde_json", "sha2 0.10.8", @@ -8779,11 +9007,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.11", ] [[package]] @@ -8794,18 +9022,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -8860,9 +9088,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -8881,9 +9109,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -8910,7 +9138,7 @@ dependencies = [ "hmac 0.12.1", "once_cell", "pbkdf2 0.11.0", - "rand", + "rand 0.8.5", "rustc-hash 1.1.0", "sha2 0.10.8", "thiserror 1.0.69", @@ -8950,9 +9178,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -8965,14 +9193,15 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", "libc", "mio", + "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", "socket2", @@ -8992,13 +9221,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -9013,9 +9242,9 @@ dependencies = [ [[package]] name = "tokio-postgres" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" +checksum = "6c95d533c83082bb6490e0189acaa0bbeef9084e60471b696ca6988cd0541fb0" dependencies = [ "async-trait", "byteorder", @@ -9030,7 +9259,7 @@ dependencies = [ "pin-project-lite", "postgres-protocol", "postgres-types", - "rand", + "rand 0.9.0", "socket2", "tokio", "tokio-util", @@ -9060,9 +9289,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -9072,9 +9301,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -9103,7 +9332,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.22", + "toml_edit 0.22.23", ] [[package]] @@ -9121,34 +9350,34 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.1", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.1", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.20", + "winnow 0.7.0", ] [[package]] name = "tower" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.2", "tokio", "tower-layer", "tower-service", @@ -9169,9 +9398,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -9193,20 +9422,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -9235,9 +9464,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -9290,7 +9519,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -9355,7 +9584,7 @@ dependencies = [ "milhouse", "parking_lot 0.12.3", "paste", - "rand", + "rand 0.8.5", "rand_xorshift", "rayon", "regex", @@ -9422,21 +9651,21 @@ checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" [[package]] name = "unicase" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-bidi" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-normalization" @@ -9519,7 +9748,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 1.0.3", + "idna", "percent-encoding", ] @@ -9547,10 +9776,19 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom", + "getrandom 0.2.15", "serde", ] +[[package]] +name = "uuid" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" +dependencies = [ + "getrandom 0.2.15", +] + [[package]] name = "validator_client" version = "0.3.5" @@ -9566,7 +9804,7 @@ dependencies = [ "eth2", "fdlimit", "graffiti_file", - "hyper 1.5.1", + "hyper 1.6.0", "initialized_validators", "lighthouse_validator_store", "metrics", @@ -9598,7 +9836,7 @@ dependencies = [ "filesystem", "hex", "lockfile", - "rand", + "rand 0.8.5", "tempfile", "tree_hash", "types", @@ -9628,7 +9866,7 @@ dependencies = [ "lighthouse_version", "logging", "parking_lot 0.12.3", - "rand", + "rand 0.8.5", "sensitive_url", "serde", "signing_method", @@ -9710,6 +9948,7 @@ version = "0.1.0" dependencies = [ "beacon_node_fallback", "bls", + "either", "eth2", "futures", "graffiti_file", @@ -9735,10 +9974,24 @@ dependencies = [ ] [[package]] -name = "valuable" +name = "validator_test_rig" version = "0.1.0" +dependencies = [ + "eth2", + "logging", + "mockito", + "regex", + "sensitive_url", + "serde_json", + "slog", + "types", +] + +[[package]] +name = "valuable" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vcpkg" @@ -9803,7 +10056,7 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "mime", "mime_guess", @@ -9844,6 +10097,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasite" version = "0.1.0" @@ -9852,47 +10114,48 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9900,22 +10163,25 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "wasm-streams" @@ -9960,12 +10226,12 @@ dependencies = [ "env_logger 0.9.3", "eth2", "http_api", - "hyper 1.5.1", + "hyper 1.6.0", "log", "logging", "network", "r2d2", - "rand", + "rand 0.8.5", "reqwest", "serde", "serde_json", @@ -9981,9 +10247,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -10043,7 +10309,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.41", + "rustix 0.38.44", ] [[package]] @@ -10052,7 +10318,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.5.7", + "redox_syscall 0.5.8", "wasite", "web-sys", ] @@ -10110,6 +10376,16 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-acl" version = "0.3.0" @@ -10137,10 +10413,45 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dcc5b895a6377f1ab9fa55acedab1fd5ac0db66ad1e6c7f47e28a22e446a5dd" dependencies = [ - "windows-result", + "windows-result 0.1.2", "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result 0.2.0", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "windows-result" version = "0.1.2" @@ -10150,6 +10461,25 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -10375,9 +10705,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.20" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "7e49d2d35d3fad69b39b94139037ecfb4f359f08958b9c11e7315ce770462419" dependencies = [ "memchr", ] @@ -10392,6 +10722,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.8.0", +] + [[package]] name = "write16" version = "1.0.0" @@ -10445,7 +10784,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ "curve25519-dalek", - "rand_core", + "rand_core 0.6.4", "serde", "zeroize", ] @@ -10478,14 +10817,14 @@ dependencies = [ "futures-util", "libc", "log", - "rand", + "rand 0.8.5", ] [[package]] name = "xml-rs" -version = "0.8.23" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" +checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" [[package]] name = "xmltree" @@ -10518,7 +10857,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.3", "pin-project", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -10533,7 +10872,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.3", "pin-project", - "rand", + "rand 0.8.5", "static_assertions", "web-time", ] @@ -10567,7 +10906,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", "synstructure", ] @@ -10578,7 +10917,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a367f292d93d4eab890745e75a778da40909cab4d6ff8173693812f79c4a2468" +dependencies = [ + "zerocopy-derive 0.8.14", ] [[package]] @@ -10589,7 +10937,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3931cb58c62c13adec22e38686b559c86a30565e16ad6e8510a337cedc611e1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", ] [[package]] @@ -10609,7 +10968,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", "synstructure", ] @@ -10631,7 +10990,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] @@ -10653,7 +11012,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.98", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f316adbded..db365738be 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,8 +86,10 @@ members = [ "testing/simulator", "testing/state_transition_vectors", "testing/test-test_logger", + "testing/validator_test_rig", "testing/web3signer_tests", + "validator_client", "validator_client/beacon_node_fallback", "validator_client/doppelganger_service", @@ -132,14 +134,7 @@ delay_map = "0.4" derivative = "2" dirs = "3" either = "1.9" -# TODO: rust_eth_kzg is pinned for now while a perf regression is investigated -# The crate_crypto_* dependencies can be removed from this file completely once we update -rust_eth_kzg = "=0.5.1" -crate_crypto_internal_eth_kzg_bls12_381 = "=0.5.1" -crate_crypto_internal_eth_kzg_erasure_codes = "=0.5.1" -crate_crypto_internal_eth_kzg_maybe_rayon = "=0.5.1" -crate_crypto_internal_eth_kzg_polynomial = "=0.5.1" -crate_crypto_kzg_multi_open_fk20 = "=0.5.1" +rust_eth_kzg = "0.5.3" discv5 = { version = "0.9", features = ["libp2p"] } env_logger = "0.9" ethereum_hashing = "0.7.0" @@ -162,6 +157,7 @@ log = "0.4" lru = "0.12" maplit = "1" milhouse = "0.3" +mockito = "1.5.0" num_cpus = "1" parking_lot = "0.12" paste = "1" @@ -269,6 +265,7 @@ malloc_utils = { path = "common/malloc_utils" } merkle_proof = { path = "consensus/merkle_proof" } monitoring_api = { path = "common/monitoring_api" } network = { path = "beacon_node/network" } +node_test_rig = { path = "testing/node_test_rig" } operation_pool = { path = "beacon_node/operation_pool" } pretty_reqwest_error = { path = "common/pretty_reqwest_error" } proto_array = { path = "consensus/proto_array" } @@ -291,6 +288,7 @@ validator_http_api = { path = "validator_client/http_api" } validator_http_metrics = { path = "validator_client/http_metrics" } validator_metrics = { path = "validator_client/validator_metrics" } validator_store = { path = "validator_client/validator_store" } +validator_test_rig = { path = "testing/validator_test_rig" } warp_utils = { path = "common/warp_utils" } xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "50d63cdf1878e5cf3538e9aae5eed34a22c64e4a" } zstd = "0.13" diff --git a/Cross.toml b/Cross.toml index 871391253d..8181967f32 100644 --- a/Cross.toml +++ b/Cross.toml @@ -3,3 +3,14 @@ pre-build = ["apt-get install -y cmake clang-5.0"] [target.aarch64-unknown-linux-gnu] pre-build = ["apt-get install -y cmake clang-5.0"] + +# Allow setting page size limits for jemalloc at build time: +# For certain architectures (like aarch64), we must compile +# jemalloc with support for large page sizes, otherwise the host's +# system page size will be used, which may not work on the target systems. +# JEMALLOC_SYS_WITH_LG_PAGE=16 tells jemalloc to support up to 64-KiB +# pages. See: https://github.com/sigp/lighthouse/issues/5244 +[build.env] +passthrough = [ + "JEMALLOC_SYS_WITH_LG_PAGE", +] diff --git a/Dockerfile b/Dockerfile index 0f334e2ac8..437c864c30 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.80.0-bullseye AS builder +FROM rust:1.84.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/Makefile b/Makefile index 4d95f50c5c..f621f38a63 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly # List of features to use when cross-compiling. Can be overridden via the environment. -CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc +CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc,beacon-node-leveldb,beacon-node-redb # Cargo profile for Cross builds. Default is for local builds, CI uses an override. CROSS_PROFILE ?= release @@ -63,12 +63,18 @@ install-lcli: build-x86_64: cross build --bin lighthouse --target x86_64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-aarch64: - cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked + # JEMALLOC_SYS_WITH_LG_PAGE=16 tells jemalloc to support up to 64-KiB + # pages, which are commonly used by aarch64 systems. + # See: https://github.com/sigp/lighthouse/issues/5244 + JEMALLOC_SYS_WITH_LG_PAGE=16 cross build --bin lighthouse --target aarch64-unknown-linux-gnu --features "portable,$(CROSS_FEATURES)" --profile "$(CROSS_PROFILE)" --locked build-lcli-x86_64: cross build --bin lcli --target x86_64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked build-lcli-aarch64: - cross build --bin lcli --target aarch64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked + # JEMALLOC_SYS_WITH_LG_PAGE=16 tells jemalloc to support up to 64-KiB + # pages, which are commonly used by aarch64 systems. + # See: https://github.com/sigp/lighthouse/issues/5244 + JEMALLOC_SYS_WITH_LG_PAGE=16 cross build --bin lcli --target aarch64-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -222,7 +228,7 @@ lint-fix: # Also run the lints on the optimized-only tests lint-full: - RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" $(MAKE) lint + TEST_FEATURES="beacon-node-leveldb,beacon-node-redb,${TEST_FEATURES}" RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" $(MAKE) lint # Runs the makefile in the `ef_tests` repo. # @@ -244,7 +250,7 @@ install-audit: cargo install --force cargo-audit audit-CI: - cargo audit --ignore RUSTSEC-2024-0421 + cargo audit # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index 534939cf6b..44ec638a09 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -2,11 +2,8 @@ mod common; pub mod validator; pub mod wallet; -use clap::Arg; -use clap::ArgAction; use clap::ArgMatches; use clap::Command; -use clap_utils::FLAG_HEADER; use environment::Environment; use types::EthSpec; @@ -21,15 +18,6 @@ pub fn cli_app() -> Command { .visible_aliases(["a", "am", "account"]) .about("Utilities for generating and managing Ethereum 2.0 accounts.") .display_order(0) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER), - ) .subcommand(wallet::cli_app()) .subcommand(validator::cli_app()) } diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index 61584cbfbb..b699301cde 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -8,7 +8,6 @@ pub mod slashing_protection; use crate::{VALIDATOR_DIR_FLAG, VALIDATOR_DIR_FLAG_ALIAS}; use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::FLAG_HEADER; use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR}; use environment::Environment; use std::path::PathBuf; @@ -20,16 +19,6 @@ pub fn cli_app() -> Command { Command::new(CMD) .display_order(0) .about("Provides commands for managing Eth2 validators.") - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - .global(true), - ) .arg( Arg::new(VALIDATOR_DIR_FLAG) .long(VALIDATOR_DIR_FLAG) diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index c34f0363a4..f6f3bb0419 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -4,7 +4,6 @@ pub mod recover; use crate::WALLETS_DIR_FLAG; use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::FLAG_HEADER; use directory::{parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; use std::fs::create_dir_all; use std::path::PathBuf; @@ -15,16 +14,6 @@ pub fn cli_app() -> Command { Command::new(CMD) .about("Manage wallets, from which validator keys can be derived.") .display_order(0) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - .global(true) - ) .arg( Arg::new(WALLETS_DIR_FLAG) .long(WALLETS_DIR_FLAG) diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 3b37b09e40..4f7c480c8c 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -175,7 +175,9 @@ impl BeaconChain { let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; - for effective_balance_eth in 1..=self.max_effective_balance_increment_steps()? { + for effective_balance_eth in + 1..=self.max_effective_balance_increment_steps(previous_epoch)? + { let effective_balance = effective_balance_eth.safe_mul(spec.effective_balance_increment)?; let base_reward = @@ -321,11 +323,14 @@ impl BeaconChain { }) } - fn max_effective_balance_increment_steps(&self) -> Result { + fn max_effective_balance_increment_steps( + &self, + rewards_epoch: Epoch, + ) -> Result { let spec = &self.spec; - let max_steps = spec - .max_effective_balance - .safe_div(spec.effective_balance_increment)?; + let fork_name = spec.fork_name_at_epoch(rewards_epoch); + let max_effective_balance = spec.max_effective_balance_for_fork(fork_name); + let max_steps = max_effective_balance.safe_div(spec.effective_balance_increment)?; Ok(max_steps) } @@ -386,7 +391,9 @@ impl BeaconChain { let mut ideal_attestation_rewards_list = Vec::new(); let sqrt_total_active_balance = SqrtTotalActiveBalance::new(total_balances.current_epoch()); - for effective_balance_step in 1..=self.max_effective_balance_increment_steps()? { + for effective_balance_step in + 1..=self.max_effective_balance_increment_steps(previous_epoch)? + { let effective_balance = effective_balance_step.safe_mul(spec.effective_balance_increment)?; let base_reward = diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index ffaf61e41a..a70a2caa4f 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -60,9 +60,9 @@ use std::borrow::Cow; use strum::AsRefStr; use tree_hash::TreeHash; use types::{ - Attestation, AttestationRef, BeaconCommittee, BeaconStateError::NoCommitteeFound, ChainSpec, - CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, SelectionProof, - SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, + Attestation, AttestationData, AttestationRef, BeaconCommittee, + BeaconStateError::NoCommitteeFound, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, + IndexedAttestation, SelectionProof, SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, }; pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; @@ -115,6 +115,17 @@ pub enum Error { /// /// The peer has sent an invalid message. AggregatorNotInCommittee { aggregator_index: u64 }, + /// The `attester_index` for a `SingleAttestation` is not a member of the committee defined + /// by its `beacon_block_root`, `committee_index` and `slot`. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + AttesterNotInCommittee { + attester_index: u64, + committee_index: u64, + slot: Slot, + }, /// The aggregator index refers to a validator index that we have not seen. /// /// ## Peer scoring @@ -327,8 +338,8 @@ impl VerifiedUnaggregatedAttestation<'_, T> { pub fn single_attestation(&self) -> Option { Some(SingleAttestation { - committee_index: self.attestation.committee_index()? as usize, - attester_index: self.validator_index, + committee_index: self.attestation.committee_index()?, + attester_index: self.validator_index as u64, data: self.attestation.data().clone(), signature: self.attestation.signature().clone(), }) @@ -485,7 +496,11 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; + verify_propagation_slot_range::<_, T::EthSpec>( + &chain.slot_clock, + attestation.data(), + &chain.spec, + )?; // Check the attestation's epoch matches its target. if attestation.data().slot.epoch(T::EthSpec::slots_per_epoch()) @@ -817,7 +832,11 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // // We do not queue future attestations for later processing. - verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?; + verify_propagation_slot_range::<_, T::EthSpec>( + &chain.slot_clock, + attestation.data(), + &chain.spec, + )?; // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // aggregation bit set. @@ -1133,10 +1152,10 @@ fn verify_head_block_is_known( /// Accounts for `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. pub fn verify_propagation_slot_range( slot_clock: &S, - attestation: AttestationRef, + attestation: &AttestationData, spec: &ChainSpec, ) -> Result<(), Error> { - let attestation_slot = attestation.data().slot; + let attestation_slot = attestation.slot; let latest_permissible_slot = slot_clock .now_with_future_tolerance(spec.maximum_gossip_clock_disparity()) .ok_or(BeaconChainError::UnableToReadSlot)?; diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d0c294b44f..ca21b519f1 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -34,6 +34,7 @@ use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, Prep use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker}; +use crate::kzg_utils::reconstruct_blobs; use crate::light_client_finality_update_verification::{ Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, }; @@ -1249,6 +1250,55 @@ impl BeaconChain { self.store.get_blobs(block_root).map_err(Error::from) } + /// Returns the data columns at the given root, if any. + /// + /// ## Errors + /// May return a database error. + pub fn get_data_columns( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + self.store.get_data_columns(block_root).map_err(Error::from) + } + + /// Returns the blobs at the given root, if any. + /// + /// Uses the `block.epoch()` to determine whether to retrieve blobs or columns from the store. + /// + /// If at least 50% of columns are retrieved, blobs will be reconstructed and returned, + /// otherwise an error `InsufficientColumnsToReconstructBlobs` is returned. + /// + /// ## Errors + /// May return a database error. + pub fn get_or_reconstruct_blobs( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + let Some(block) = self.store.get_blinded_block(block_root)? else { + return Ok(None); + }; + + if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + if let Some(columns) = self.store.get_data_columns(block_root)? { + let num_required_columns = self.spec.number_of_columns / 2; + let reconstruction_possible = columns.len() >= num_required_columns as usize; + if reconstruction_possible { + reconstruct_blobs(&self.kzg, &columns, None, &block, &self.spec) + .map(Some) + .map_err(Error::FailedToReconstructBlobs) + } else { + Err(Error::InsufficientColumnsToReconstructBlobs { + columns_found: columns.len(), + }) + } + } else { + Ok(None) + } + } else { + self.get_blobs(block_root).map(|b| b.blobs()) + } + } + /// Returns the data columns at the given root, if any. /// /// ## Errors @@ -5850,6 +5900,7 @@ impl BeaconChain { let kzg = self.kzg.as_ref(); + // TODO(fulu): we no longer need blob proofs from PeerDAS and could avoid computing. kzg_utils::validate_blobs::( kzg, expected_kzg_commitments, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 315105ac2b..1265276376 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -208,24 +208,18 @@ pub enum BlockError { /// /// The block is invalid and the peer is faulty. IncorrectBlockProposer { block: u64, local_shuffling: u64 }, - /// The proposal signature in invalid. - /// - /// ## Peer scoring - /// - /// The block is invalid and the peer is faulty. - ProposalSignatureInvalid, /// The `block.proposal_index` is not known. /// /// ## Peer scoring /// /// The block is invalid and the peer is faulty. UnknownValidator(u64), - /// A signature in the block is invalid (exactly which is unknown). + /// A signature in the block is invalid /// /// ## Peer scoring /// /// The block is invalid and the peer is faulty. - InvalidSignature, + InvalidSignature(InvalidSignature), /// The provided block is not from a later slot than its parent. /// /// ## Peer scoring @@ -329,6 +323,17 @@ pub enum BlockError { InternalError(String), } +/// Which specific signature(s) are invalid in a SignedBeaconBlock +#[derive(Debug)] +pub enum InvalidSignature { + // The outer signature in a SignedBeaconBlock + ProposerSignature, + // One or more signatures in BeaconBlockBody + BlockBodySignatures, + // One or more signatures in SignedBeaconBlock + Unknown, +} + impl From for BlockError { fn from(e: AvailabilityCheckError) -> Self { Self::AvailabilityCheck(e) @@ -523,7 +528,9 @@ pub enum BlockSlashInfo { impl BlockSlashInfo { pub fn from_early_error_block(header: SignedBeaconBlockHeader, e: BlockError) -> Self { match e { - BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), + BlockError::InvalidSignature(InvalidSignature::ProposerSignature) => { + BlockSlashInfo::SignatureInvalid(e) + } // `InvalidSignature` could indicate any signature in the block, so we want // to recheck the proposer signature alone. _ => BlockSlashInfo::SignatureNotChecked(header, e), @@ -652,7 +659,7 @@ pub fn signature_verify_chain_segment( } if signature_verifier.verify().is_err() { - return Err(BlockError::InvalidSignature); + return Err(BlockError::InvalidSignature(InvalidSignature::Unknown)); } drop(pubkey_cache); @@ -964,7 +971,9 @@ impl GossipVerifiedBlock { }; if !signature_is_valid { - return Err(BlockError::ProposalSignatureInvalid); + return Err(BlockError::InvalidSignature( + InvalidSignature::ProposerSignature, + )); } chain @@ -1098,7 +1107,26 @@ impl SignatureVerifiedBlock { parent: Some(parent), }) } else { - Err(BlockError::InvalidSignature) + // Re-verify the proposer signature in isolation to attribute fault + let pubkey = pubkey_cache + .get(block.message().proposer_index() as usize) + .ok_or_else(|| BlockError::UnknownValidator(block.message().proposer_index()))?; + if block.as_block().verify_signature( + Some(block_root), + pubkey, + &state.fork(), + chain.genesis_validators_root, + &chain.spec, + ) { + // Proposer signature is valid, the invalid signature must be in the body + Err(BlockError::InvalidSignature( + InvalidSignature::BlockBodySignatures, + )) + } else { + Err(BlockError::InvalidSignature( + InvalidSignature::ProposerSignature, + )) + } } } @@ -1153,7 +1181,9 @@ impl SignatureVerifiedBlock { consensus_context, }) } else { - Err(BlockError::InvalidSignature) + Err(BlockError::InvalidSignature( + InvalidSignature::BlockBodySignatures, + )) } } @@ -1981,7 +2011,7 @@ impl BlockBlobError for BlockError { } fn proposer_signature_invalid() -> Self { - BlockError::ProposalSignatureInvalid + BlockError::InvalidSignature(InvalidSignature::ProposerSignature) } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 9d99ff9d8e..8d62478bea 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -9,6 +9,7 @@ use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; use crate::head_tracker::HeadTracker; +use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::observed_data_sidecars::ObservedDataSidecars; @@ -562,9 +563,30 @@ where .put_block(&weak_subj_block_root, weak_subj_block.clone()) .map_err(|e| format!("Failed to store weak subjectivity block: {e:?}"))?; if let Some(blobs) = weak_subj_blobs { - store - .put_blobs(&weak_subj_block_root, blobs) - .map_err(|e| format!("Failed to store weak subjectivity blobs: {e:?}"))?; + if self + .spec + .is_peer_das_enabled_for_epoch(weak_subj_block.epoch()) + { + // After PeerDAS recompute columns from blobs to not force the checkpointz server + // into exposing another route. + let blobs = blobs + .iter() + .map(|blob_sidecar| &blob_sidecar.blob) + .collect::>(); + let data_columns = + blobs_to_data_column_sidecars(&blobs, &weak_subj_block, &self.kzg, &self.spec) + .map_err(|e| { + format!("Failed to compute weak subjectivity data_columns: {e:?}") + })?; + // TODO(das): only persist the columns under custody + store + .put_data_columns(&weak_subj_block_root, data_columns) + .map_err(|e| format!("Failed to store weak subjectivity data_column: {e:?}"))?; + } else { + store + .put_blobs(&weak_subj_block_root, blobs) + .map_err(|e| format!("Failed to store weak subjectivity blobs: {e:?}"))?; + } } // Stage the database's metadata fields for atomic storage when `build` is called. diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index b8a607c886..fcdd57abbc 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -124,7 +124,7 @@ impl Default for ChainConfig { genesis_backfill: false, always_prepare_payload: false, epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION, - enable_light_client_server: false, + enable_light_client_server: true, malicious_withhold_count: 0, enable_sampling: false, blob_publication_batches: 4, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index aa4689121c..f10d59ca1a 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -27,8 +27,8 @@ mod overflow_lru_cache; mod state_lru_cache; use crate::data_column_verification::{ - verify_kzg_for_data_column, verify_kzg_for_data_column_list, CustodyDataColumn, - GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, + verify_kzg_for_data_column_list_with_scoring, CustodyDataColumn, GossipVerifiedDataColumn, + KzgVerifiedCustodyDataColumn, KzgVerifiedDataColumn, }; use crate::metrics::{ KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS, KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES, @@ -230,19 +230,14 @@ impl DataAvailabilityChecker { block_root: Hash256, custody_columns: DataColumnSidecarList, ) -> Result, AvailabilityCheckError> { - // TODO(das): report which column is invalid for proper peer scoring - // TODO(das): batch KZG verification here, but fallback into checking each column - // individually to report which column(s) are invalid. - let verified_custody_columns = custody_columns + // Attributes fault to the specific peer that sent an invalid column + let kzg_verified_columns = KzgVerifiedDataColumn::from_batch(custody_columns, &self.kzg) + .map_err(AvailabilityCheckError::InvalidColumn)?; + + let verified_custody_columns = kzg_verified_columns .into_iter() - .map(|column| { - let index = column.index; - Ok(KzgVerifiedCustodyDataColumn::from_asserted_custody( - KzgVerifiedDataColumn::new(column, &self.kzg) - .map_err(|e| AvailabilityCheckError::InvalidColumn(index, e))?, - )) - }) - .collect::, AvailabilityCheckError>>()?; + .map(KzgVerifiedCustodyDataColumn::from_asserted_custody) + .collect::>(); self.availability_cache.put_kzg_verified_data_columns( block_root, @@ -365,7 +360,8 @@ impl DataAvailabilityChecker { .iter() .map(|custody_column| custody_column.as_data_column()), &self.kzg, - )?; + ) + .map_err(AvailabilityCheckError::InvalidColumn)?; Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, @@ -432,8 +428,9 @@ impl DataAvailabilityChecker { // verify kzg for all data columns at once if !all_data_columns.is_empty() { - // TODO: Need to also attribute which specific block is faulty - verify_kzg_for_data_column_list_with_scoring(all_data_columns.iter(), &self.kzg)?; + // Attributes fault to the specific peer that sent an invalid column + verify_kzg_for_data_column_list_with_scoring(all_data_columns.iter(), &self.kzg) + .map_err(AvailabilityCheckError::InvalidColumn)?; } for block in blocks { @@ -716,32 +713,6 @@ async fn availability_cache_maintenance_service( } } -fn verify_kzg_for_data_column_list_with_scoring<'a, E: EthSpec, I>( - data_column_iter: I, - kzg: &'a Kzg, -) -> Result<(), AvailabilityCheckError> -where - I: Iterator>> + Clone, -{ - let Err(batch_err) = verify_kzg_for_data_column_list(data_column_iter.clone(), kzg) else { - return Ok(()); - }; - - let data_columns = data_column_iter.collect::>(); - // Find which column is invalid. If len is 1 or 0 continue to default case below. - // If len > 1 at least one column MUST fail. - if data_columns.len() > 1 { - for data_column in data_columns { - if let Err(e) = verify_kzg_for_data_column(data_column.clone(), kzg) { - return Err(AvailabilityCheckError::InvalidColumn(data_column.index, e)); - } - } - } - - // len 0 should never happen - Err(AvailabilityCheckError::InvalidColumn(0, batch_err)) -} - /// A fully available block that is ready to be imported into fork choice. #[derive(Clone, Debug, PartialEq)] pub struct AvailableBlock { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index cfdb3cfe91..1ab85ab105 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -4,7 +4,7 @@ use types::{BeaconStateError, ColumnIndex, Hash256}; #[derive(Debug)] pub enum Error { InvalidBlobs(KzgError), - InvalidColumn(ColumnIndex, KzgError), + InvalidColumn(Vec<(ColumnIndex, KzgError)>), ReconstructColumnsError(KzgError), KzgCommitmentMismatch { blob_commitment: KzgCommitment, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index c8e92f7e9f..cd793c8394 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -317,7 +317,6 @@ impl PendingComponents { None, ) }; - let executed_block = recover(diet_executed_block)?; let AvailabilityPendingExecutedBlock { @@ -732,7 +731,7 @@ mod test { use slog::{info, Logger}; use state_processing::ConsensusContext; use std::collections::VecDeque; - use store::{HotColdDB, ItemStore, LevelDB, StoreConfig}; + use store::{database::interface::BeaconNodeBackend, HotColdDB, ItemStore, StoreConfig}; use tempfile::{tempdir, TempDir}; use types::non_zero_usize::new_non_zero_usize; use types::{ExecPayload, MinimalEthSpec}; @@ -744,7 +743,7 @@ mod test { db_path: &TempDir, spec: Arc, log: Logger, - ) -> Arc, LevelDB>> { + ) -> Arc, BeaconNodeBackend>> { let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); let blobs_path = db_path.path().join("blobs_db"); @@ -920,7 +919,11 @@ mod test { ) where E: EthSpec, - T: BeaconChainTypes, ColdStore = LevelDB, EthSpec = E>, + T: BeaconChainTypes< + HotStore = BeaconNodeBackend, + ColdStore = BeaconNodeBackend, + EthSpec = E, + >, { let log = test_logger(); let chain_db_path = tempdir().expect("should get temp dir"); diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 1bd17485ab..1262fcdeb8 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -239,6 +239,18 @@ impl KzgVerifiedDataColumn { pub fn new(data_column: Arc>, kzg: &Kzg) -> Result { verify_kzg_for_data_column(data_column, kzg) } + + pub fn from_batch( + data_columns: Vec>>, + kzg: &Kzg, + ) -> Result, Vec<(ColumnIndex, KzgError)>> { + verify_kzg_for_data_column_list_with_scoring(data_columns.iter(), kzg)?; + Ok(data_columns + .into_iter() + .map(|column| Self { data: column }) + .collect()) + } + pub fn to_data_column(self) -> Arc> { self.data } @@ -378,6 +390,38 @@ where Ok(()) } +/// Complete kzg verification for a list of `DataColumnSidecar`s. +/// +/// If there's at least one invalid column, it re-verifies all columns individually to identify the +/// first column that is invalid. This is necessary to attribute fault to the specific peer that +/// sent bad data. The re-verification cost should not be significant. If a peer sends invalid data it +/// will be quickly banned. +pub fn verify_kzg_for_data_column_list_with_scoring<'a, E: EthSpec, I>( + data_column_iter: I, + kzg: &'a Kzg, +) -> Result<(), Vec<(ColumnIndex, KzgError)>> +where + I: Iterator>> + Clone, +{ + if verify_kzg_for_data_column_list(data_column_iter.clone(), kzg).is_ok() { + return Ok(()); + }; + + // Find all columns that are invalid and identify by index. If we hit this condition there + // should be at least one invalid column + let errors = data_column_iter + .filter_map(|data_column| { + if let Err(e) = verify_kzg_for_data_column(data_column.clone(), kzg) { + Some((data_column.index, e)) + } else { + None + } + }) + .collect::>(); + + Err(errors) +} + pub fn validate_data_column_sidecar_for_gossip( data_column: Arc>, subnet: u64, @@ -699,7 +743,7 @@ mod test { #[tokio::test] async fn empty_data_column_sidecars_fails_validation() { - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec.into()) .deterministic_keypairs(64) diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 2a8fd4cd01..2e13ab4090 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -226,6 +226,10 @@ pub enum BeaconChainError { EmptyRpcCustodyColumns, AttestationError(AttestationError), AttestationCommitteeIndexNotSet, + InsufficientColumnsToReconstructBlobs { + columns_found: usize, + }, + FailedToReconstructBlobs(String), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index 49e46a50fe..6e365f936d 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -91,7 +91,7 @@ pub async fn fetch_and_process_engine_blobs( .await .map_err(FetchEngineBlobError::RequestFailed)?; - if response.is_empty() { + if response.is_empty() || response.iter().all(|opt| opt.is_none()) { debug!( log, "No blobs fetched from the EL"; @@ -163,6 +163,20 @@ pub async fn fetch_and_process_engine_blobs( return Ok(None); } + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + // Avoid computing columns if block has already been imported. + debug!( + log, + "Ignoring EL blobs response"; + "info" => "block has already been imported", + ); + return Ok(None); + } + let data_columns_receiver = spawn_compute_and_publish_data_columns_task( &chain, block.clone(), @@ -248,18 +262,21 @@ fn spawn_compute_and_publish_data_columns_task( } }; - if let Err(e) = data_columns_sender.send(all_data_columns.clone()) { - error!(log, "Failed to send computed data columns"; "error" => ?e); + if data_columns_sender.send(all_data_columns.clone()).is_err() { + // Data column receiver have been dropped - block may have already been imported. + // This race condition exists because gossip columns may arrive and trigger block + // import during the computation. Here we just drop the computed columns. + debug!( + log, + "Failed to send computed data columns"; + ); + return; }; - // Check indices from cache before sending the columns, to make sure we don't - // publish components already seen on gossip. - let is_supernode = chain_cloned.data_availability_checker.is_supernode(); - // At the moment non supernodes are not required to publish any columns. // TODO(das): we could experiment with having full nodes publish their custodied // columns here. - if !is_supernode { + if !chain_cloned.data_availability_checker.is_supernode() { return; } diff --git a/beacon_node/beacon_chain/src/fulu_readiness.rs b/beacon_node/beacon_chain/src/fulu_readiness.rs index 71494623f8..872fe58f2b 100644 --- a/beacon_node/beacon_chain/src/fulu_readiness.rs +++ b/beacon_node/beacon_chain/src/fulu_readiness.rs @@ -1,7 +1,7 @@ //! Provides tools for checking if a node is ready for the Fulu upgrade. use crate::{BeaconChain, BeaconChainTypes}; -use execution_layer::http::{ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V5}; +use execution_layer::http::{ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V4}; use serde::{Deserialize, Serialize}; use std::fmt; use std::time::Duration; @@ -87,14 +87,15 @@ impl BeaconChain { Ok(capabilities) => { let mut missing_methods = String::from("Required Methods Unsupported:"); let mut all_good = true; - if !capabilities.get_payload_v5 { + // TODO(fulu) switch to v5 when the EL is ready + if !capabilities.get_payload_v4 { missing_methods.push(' '); - missing_methods.push_str(ENGINE_GET_PAYLOAD_V5); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V4); all_good = false; } - if !capabilities.new_payload_v5 { + if !capabilities.new_payload_v4 { missing_methods.push(' '); - missing_methods.push_str(ENGINE_NEW_PAYLOAD_V5); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V4); all_good = false; } diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index ddae54f464..a48f32e7b4 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -10,10 +10,7 @@ use std::borrow::Cow; use std::iter; use std::time::Duration; use store::metadata::DataColumnInfo; -use store::{ - get_key_for_col, AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, - KeyValueStoreOp, -}; +use store::{AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; use strum::IntoStaticStr; use types::{FixedBytesExtended, Hash256, Slot}; @@ -133,10 +130,20 @@ impl BeaconChain { }); } - let blinded_block = block.clone_as_blinded(); - // Store block in the hot database without payload. - self.store - .blinded_block_as_kv_store_ops(&block_root, &blinded_block, &mut hot_batch); + if !self.store.get_config().prune_payloads { + // If prune-payloads is set to false, store the block which includes the execution payload + self.store + .block_as_kv_store_ops(&block_root, (*block).clone(), &mut hot_batch)?; + } else { + let blinded_block = block.clone_as_blinded(); + // Store block in the hot database without payload. + self.store.blinded_block_as_kv_store_ops( + &block_root, + &blinded_block, + &mut hot_batch, + ); + } + // Store the blobs too if let Some(blobs) = maybe_blobs { new_oldest_blob_slot = Some(block.slot()); @@ -153,7 +160,8 @@ impl BeaconChain { // Store block roots, including at all skip slots in the freezer DB. for slot in (block.slot().as_u64()..prev_block_slot.as_u64()).rev() { cold_batch.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + DBColumn::BeaconBlockRoots, + slot.to_be_bytes().to_vec(), block_root.as_slice().to_vec(), )); } @@ -169,7 +177,8 @@ impl BeaconChain { let genesis_slot = self.spec.genesis_slot; for slot in genesis_slot.as_u64()..prev_block_slot.as_u64() { cold_batch.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + DBColumn::BeaconBlockRoots, + slot.to_be_bytes().to_vec(), self.genesis_block_root.as_slice().to_vec(), )); } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index dcb3864f78..06cce14144 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -186,7 +186,7 @@ pub fn blobs_to_data_column_sidecars( .map_err(DataColumnSidecarError::BuildSidecarFailed) } -fn build_data_column_sidecars( +pub(crate) fn build_data_column_sidecars( kzg_commitments: KzgCommitments, kzg_commitments_inclusion_proof: FixedVector, signed_block_header: SignedBeaconBlockHeader, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 4783945eb1..48168aeb02 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -54,6 +54,7 @@ mod pre_finalization_cache; pub mod proposer_prep_service; pub mod schema_change; pub mod shuffling_cache; +pub mod single_attestation; pub mod state_advance_timer; pub mod sync_committee_rewards; pub mod sync_committee_verification; @@ -78,7 +79,7 @@ pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceSto pub use block_verification::{ build_blob_data_column_sidecars, get_block_root, BlockError, ExecutionPayloadError, ExecutionPendingBlock, GossipVerifiedBlock, IntoExecutionPendingBlock, IntoGossipVerifiedBlock, - PayloadVerificationOutcome, PayloadVerificationStatus, + InvalidSignature, PayloadVerificationOutcome, PayloadVerificationStatus, }; pub use block_verification_types::AvailabilityPendingExecutedBlock; pub use block_verification_types::ExecutedBlock; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index ae3add7f03..d1c7a2a5df 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -85,12 +85,6 @@ pub static BLOCK_PROCESSING_COMMITTEE: LazyLock> = LazyLock::n "Time spent building/obtaining committees for block processing.", ) }); -pub static BLOCK_PROCESSING_SIGNATURE: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_block_processing_signature_seconds", - "Time spent doing signature verification for a block.", - ) -}); pub static BLOCK_PROCESSING_CORE: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_block_processing_core_seconds", @@ -108,7 +102,7 @@ pub static BLOCK_PROCESSING_POST_EXEC_PROCESSING: LazyLock> = try_create_histogram_with_buckets( "beacon_block_processing_post_exec_pre_attestable_seconds", "Time between finishing execution processing and the block becoming attestable", - linear_buckets(5e-3, 5e-3, 10), + linear_buckets(0.01, 0.01, 15), ) }); pub static BLOCK_PROCESSING_DATA_COLUMNS_WAIT: LazyLock> = LazyLock::new(|| { @@ -591,12 +585,6 @@ pub static FORK_CHOICE_WRITE_LOCK_AQUIRE_TIMES: LazyLock> = La exponential_buckets(1e-3, 4.0, 7), ) }); -pub static FORK_CHOICE_SET_HEAD_LAG_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_fork_choice_set_head_lag_times", - "Time taken between finding the head and setting the canonical head value", - ) -}); pub static BALANCES_CACHE_HITS: LazyLock> = LazyLock::new(|| { try_create_int_counter( "beacon_balances_cache_hits_total", @@ -651,12 +639,6 @@ pub static DEFAULT_ETH1_VOTES: LazyLock> = LazyLock::new(|| { /* * Chain Head */ -pub static UPDATE_HEAD_TIMES: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_update_head_seconds", - "Time taken to update the canonical head", - ) -}); pub static HEAD_STATE_SLOT: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "beacon_head_state_slot", @@ -1547,20 +1529,6 @@ pub static SYNC_CONTRIBUTION_PROCESSING_APPLY_TO_OP_POOL: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "beacon_sync_contribution_processing_signature_setup_seconds", - "Time spent on setting up for the signature verification of sync contribution processing" - ) - }); -pub static SYNC_CONTRIBUTION_PROCESSING_SIGNATURE_TIMES: LazyLock> = - LazyLock::new(|| { - try_create_histogram( - "beacon_sync_contribution_processing_signature_seconds", - "Time spent on the signature verification of sync contribution processing", - ) - }); /* * General Sync Committee Contribution Processing @@ -1690,13 +1658,6 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = - LazyLock::new(|| { - try_create_int_counter( - "beacon_data_column_sidecar_processing_successes_total", - "Number of data column sidecars verified for gossip", - ) - }); pub static BLOBS_FROM_EL_HIT_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( @@ -1873,15 +1834,6 @@ pub static BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: LazyLock ) }, ); -/* - * Availability related metrics - */ -pub static BLOCK_AVAILABILITY_DELAY: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "block_availability_delay", - "Duration between start of the slot and the time at which all components of the block are available.", - ) -}); /* * Data Availability cache metrics @@ -1900,13 +1852,6 @@ pub static DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE: LazyLock> = - LazyLock::new(|| { - try_create_int_gauge( - "data_availability_overflow_store_cache_size", - "Number of entries in the data availability overflow store cache.", - ) - }); pub static DATA_AVAILABILITY_RECONSTRUCTION_TIME: LazyLock> = LazyLock::new(|| { try_create_histogram( diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs index fcc8b9884a..f02f5ee6f3 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v21.rs @@ -3,9 +3,7 @@ use crate::validator_pubkey_cache::DatabasePubkey; use slog::{info, Logger}; use ssz::{Decode, Encode}; use std::sync::Arc; -use store::{ - get_key_for_col, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, -}; +use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; use types::{Hash256, PublicKey}; const LOG_EVERY: usize = 200_000; @@ -62,9 +60,9 @@ pub fn downgrade_from_v21( message: format!("{e:?}"), })?; - let db_key = get_key_for_col(DBColumn::PubkeyCache.into(), key.as_slice()); ops.push(KeyValueStoreOp::PutKeyValue( - db_key, + DBColumn::PubkeyCache, + key.as_slice().to_vec(), pubkey_bytes.as_ssz_bytes(), )); diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs index c34512eded..982c3ded46 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use store::chunked_iter::ChunkedVectorIter; use store::{ chunked_vector::BlockRootsChunked, - get_key_for_col, metadata::{ SchemaVersion, ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_UNINITIALIZED, STATE_UPPER_LIMIT_NO_RETAIN, }, @@ -21,7 +20,7 @@ fn load_old_schema_frozen_state( ) -> Result>, Error> { let Some(partial_state_bytes) = db .cold_db - .get_bytes(DBColumn::BeaconState.into(), state_root.as_slice())? + .get_bytes(DBColumn::BeaconState, state_root.as_slice())? else { return Ok(None); }; @@ -136,10 +135,7 @@ pub fn delete_old_schema_freezer_data( for column in columns { for res in db.cold_db.iter_column_keys::>(column) { let key = res?; - cold_ops.push(KeyValueStoreOp::DeleteKey(get_key_for_col( - column.as_str(), - &key, - ))); + cold_ops.push(KeyValueStoreOp::DeleteKey(column, key)); } } let delete_ops = cold_ops.len(); @@ -175,7 +171,8 @@ pub fn write_new_schema_block_roots( // Store the genesis block root if it would otherwise not be stored. if oldest_block_slot != 0 { cold_ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col(DBColumn::BeaconBlockRoots.into(), &0u64.to_be_bytes()), + DBColumn::BeaconBlockRoots, + 0u64.to_be_bytes().to_vec(), genesis_block_root.as_slice().to_vec(), )); } @@ -192,10 +189,8 @@ pub fn write_new_schema_block_roots( // OK to hold these in memory (10M slots * 43 bytes per KV ~= 430 MB). for (i, (slot, block_root)) in block_root_iter.enumerate() { cold_ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col( - DBColumn::BeaconBlockRoots.into(), - &(slot as u64).to_be_bytes(), - ), + DBColumn::BeaconBlockRoots, + slot.to_be_bytes().to_vec(), block_root.as_slice().to_vec(), )); diff --git a/beacon_node/beacon_chain/src/single_attestation.rs b/beacon_node/beacon_chain/src/single_attestation.rs new file mode 100644 index 0000000000..fa4f98bb07 --- /dev/null +++ b/beacon_node/beacon_chain/src/single_attestation.rs @@ -0,0 +1,46 @@ +use crate::attestation_verification::Error; +use types::{Attestation, AttestationElectra, BitList, BitVector, EthSpec, SingleAttestation}; + +pub fn single_attestation_to_attestation( + single_attestation: &SingleAttestation, + committee: &[usize], +) -> Result, Error> { + let attester_index = single_attestation.attester_index; + let committee_index = single_attestation.committee_index; + let slot = single_attestation.data.slot; + + let aggregation_bit = committee + .iter() + .enumerate() + .find_map(|(i, &validator_index)| { + if attester_index as usize == validator_index { + return Some(i); + } + None + }) + .ok_or(Error::AttesterNotInCommittee { + attester_index, + committee_index, + slot, + })?; + + let mut committee_bits: BitVector = BitVector::default(); + committee_bits + .set(committee_index as usize, true) + .map_err(|e| Error::Invalid(e.into()))?; + + let mut aggregation_bits = + BitList::with_capacity(committee.len()).map_err(|e| Error::Invalid(e.into()))?; + aggregation_bits + .set(aggregation_bit, true) + .map_err(|e| Error::Invalid(e.into()))?; + + // TODO(electra): consider eventually allowing conversion to non-Electra attestations as well + // to maintain invertability (`Attestation` -> `SingleAttestation` -> `Attestation`). + Ok(Attestation::Electra(AttestationElectra { + aggregation_bits, + committee_bits, + data: single_attestation.data.clone(), + signature: single_attestation.signature.clone(), + })) +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 443cc686eb..8c9e3929f6 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,11 +1,13 @@ +use crate::blob_verification::GossipVerifiedBlob; use crate::block_verification_types::{AsBlock, RpcBlock}; -use crate::kzg_utils::blobs_to_data_column_sidecars; +use crate::data_column_verification::CustodyDataColumn; +use crate::kzg_utils::build_data_column_sidecars; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; -use crate::BeaconBlockResponseWrapper; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, + single_attestation::single_attestation_to_attestation, sync_committee_verification::Error as SyncCommitteeError, validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, @@ -16,6 +18,7 @@ use crate::{ BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler, StateSkipConfig, }; +use crate::{get_block_root, BeaconBlockResponseWrapper}; use bls::get_withdrawal_credentials; use eth2::types::SignedBlockContentsTuple; use execution_layer::test_utils::generate_genesis_header; @@ -28,7 +31,7 @@ use execution_layer::{ ExecutionLayer, }; use futures::channel::mpsc::Receiver; -pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; +pub use genesis::{InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use kzg::trusted_setup::get_trusted_setup; use kzg::{Kzg, TrustedSetup}; @@ -56,7 +59,8 @@ use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, LazyLock}; use std::time::Duration; -use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; +use store::database::interface::BeaconNodeBackend; +use store::{config::StoreConfig, HotColdDB, ItemStore, MemoryStore}; use task_executor::TaskExecutor; use task_executor::{test_utils::TestRuntime, ShutdownReason}; use tree_hash::TreeHash; @@ -73,6 +77,11 @@ pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // Environment variable to read if `ci_logger` feature is enabled. pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR"; +// Pre-computed data column sidecar using a single static blob from: +// `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz` +const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] = + include_bytes!("test_utils/fixtures/test_data_column_sidecars.ssz"); + // Default target aggregators to set during testing, this ensures an aggregator at each slot. // // You should mutate the `ChainSpec` prior to initialising the harness if you would like to use @@ -104,7 +113,7 @@ static KZG_NO_PRECOMP: LazyLock> = LazyLock::new(|| { }); pub fn get_kzg(spec: &ChainSpec) -> Arc { - if spec.eip7594_fork_epoch.is_some() { + if spec.fulu_fork_epoch.is_some() { KZG_PEERDAS.clone() } else if spec.deneb_fork_epoch.is_some() { KZG.clone() @@ -116,7 +125,7 @@ pub fn get_kzg(spec: &ChainSpec) -> Arc { pub type BaseHarnessType = Witness, E, THotStore, TColdStore>; -pub type DiskHarnessType = BaseHarnessType, LevelDB>; +pub type DiskHarnessType = BaseHarnessType, BeaconNodeBackend>; pub type EphemeralHarnessType = BaseHarnessType, MemoryStore>; pub type BoxedMutator = Box< @@ -223,6 +232,8 @@ pub struct Builder { mock_execution_layer: Option>, testing_slot_clock: Option, validator_monitor_config: Option, + genesis_state_builder: Option>, + import_all_data_columns: bool, runtime: TestRuntime, log: Logger, } @@ -243,16 +254,22 @@ impl Builder> { ) .unwrap(), ); + let genesis_state_builder = self.genesis_state_builder.take().unwrap_or_else(|| { + // Set alternating withdrawal credentials if no builder is specified. + InteropGenesisBuilder::default().set_alternating_eth1_withdrawal_credentials() + }); + let mutator = move |builder: BeaconChainBuilder<_>| { let header = generate_genesis_header::(builder.get_spec(), false); - let genesis_state = interop_genesis_state_with_eth1::( - &validator_keypairs, - HARNESS_GENESIS_TIME, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - header, - builder.get_spec(), - ) - .expect("should generate interop state"); + let genesis_state = genesis_state_builder + .set_opt_execution_payload_header(header) + .build_genesis_state( + &validator_keypairs, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + builder.get_spec(), + ) + .expect("should generate interop state"); builder .genesis_state(genesis_state) .expect("should build state using recent genesis") @@ -299,22 +316,31 @@ impl Builder> { impl Builder> { /// Disk store, start from genesis. - pub fn fresh_disk_store(mut self, store: Arc, LevelDB>>) -> Self { + pub fn fresh_disk_store( + mut self, + store: Arc, BeaconNodeBackend>>, + ) -> Self { let validator_keypairs = self .validator_keypairs .clone() .expect("cannot build without validator keypairs"); + let genesis_state_builder = self.genesis_state_builder.take().unwrap_or_else(|| { + // Set alternating withdrawal credentials if no builder is specified. + InteropGenesisBuilder::default().set_alternating_eth1_withdrawal_credentials() + }); + let mutator = move |builder: BeaconChainBuilder<_>| { let header = generate_genesis_header::(builder.get_spec(), false); - let genesis_state = interop_genesis_state_with_eth1::( - &validator_keypairs, - HARNESS_GENESIS_TIME, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - header, - builder.get_spec(), - ) - .expect("should generate interop state"); + let genesis_state = genesis_state_builder + .set_opt_execution_payload_header(header) + .build_genesis_state( + &validator_keypairs, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + builder.get_spec(), + ) + .expect("should generate interop state"); builder .genesis_state(genesis_state) .expect("should build state using recent genesis") @@ -324,7 +350,10 @@ impl Builder> { } /// Disk store, resume. - pub fn resumed_disk_store(mut self, store: Arc, LevelDB>>) -> Self { + pub fn resumed_disk_store( + mut self, + store: Arc, BeaconNodeBackend>>, + ) -> Self { let mutator = move |builder: BeaconChainBuilder<_>| { builder .resume_from_db() @@ -359,6 +388,8 @@ where mock_execution_layer: None, testing_slot_clock: None, validator_monitor_config: None, + genesis_state_builder: None, + import_all_data_columns: false, runtime, log, } @@ -451,6 +482,11 @@ where self } + pub fn import_all_data_columns(mut self, import_all_data_columns: bool) -> Self { + self.import_all_data_columns = import_all_data_columns; + self + } + pub fn execution_layer_from_url(mut self, url: &str) -> Self { assert!( self.execution_layer.is_none(), @@ -538,6 +574,15 @@ where self } + pub fn with_genesis_state_builder( + mut self, + f: impl FnOnce(InteropGenesisBuilder) -> InteropGenesisBuilder, + ) -> Self { + let builder = self.genesis_state_builder.take().unwrap_or_default(); + self.genesis_state_builder = Some(f(builder)); + self + } + pub fn build(self) -> BeaconChainHarness> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); @@ -568,6 +613,7 @@ where .expect("should build dummy backend") .shutdown_sender(shutdown_tx) .chain_config(chain_config) + .import_all_data_columns(self.import_all_data_columns) .event_handler(Some(ServerSentEventHandler::new_with_capacity( log.clone(), 5, @@ -755,15 +801,13 @@ where pub fn get_head_block(&self) -> RpcBlock { let block = self.chain.head_beacon_block(); let block_root = block.canonical_root(); - let blobs = self.chain.get_blobs(&block_root).unwrap().blobs(); - RpcBlock::new(Some(block_root), block, blobs).unwrap() + self.build_rpc_block_from_store_blobs(Some(block_root), block) } pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { let block = self.chain.get_blinded_block(block_root).unwrap().unwrap(); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); - let blobs = self.chain.get_blobs(block_root).unwrap().blobs(); - RpcBlock::new(Some(*block_root), Arc::new(full_block), blobs).unwrap() + self.build_rpc_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) } pub fn get_all_validators(&self) -> Vec { @@ -1111,15 +1155,16 @@ where .unwrap(); let single_attestation = - attestation.to_single_attestation_with_attester_index(attester_index)?; + attestation.to_single_attestation_with_attester_index(attester_index as u64)?; - let attestation: Attestation = single_attestation.to_attestation(committee.committee)?; + let attestation: Attestation = + single_attestation_to_attestation(&single_attestation, committee.committee).unwrap(); assert_eq!( single_attestation.committee_index, - attestation.committee_index().unwrap() as usize + attestation.committee_index().unwrap() ); - assert_eq!(single_attestation.attester_index, validator_index); + assert_eq!(single_attestation.attester_index, validator_index as u64); Ok(single_attestation) } @@ -2264,22 +2309,19 @@ where self.set_current_slot(slot); let (block, blob_items) = block_contents; - let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) - .transpose() - .unwrap(); + let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?; let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - RpcBlock::new(Some(block_root), block, sidecars).unwrap(), + rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), ) .await? .try_into() - .unwrap(); + .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -2290,16 +2332,13 @@ where ) -> Result { let (block, blob_items) = block_contents; - let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) - .transpose() - .unwrap(); let block_root = block.canonical_root(); + let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?; let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - RpcBlock::new(Some(block_root), block, sidecars).unwrap(), + rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), @@ -2311,6 +2350,75 @@ where Ok(block_hash) } + /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from + /// the database. + pub fn build_rpc_block_from_store_blobs( + &self, + block_root: Option, + block: Arc>, + ) -> RpcBlock { + let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); + let has_blobs = block + .message() + .body() + .blob_kzg_commitments() + .is_ok_and(|c| !c.is_empty()); + if !has_blobs { + return RpcBlock::new_without_blobs(Some(block_root), block); + } + + // Blobs are stored as data columns from Fulu (PeerDAS) + if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + let columns = self.chain.get_data_columns(&block_root).unwrap().unwrap(); + let custody_columns = columns + .into_iter() + .map(CustodyDataColumn::from_asserted_custody) + .collect::>(); + RpcBlock::new_with_custody_columns(Some(block_root), block, custody_columns, &self.spec) + .unwrap() + } else { + let blobs = self.chain.get_blobs(&block_root).unwrap().blobs(); + RpcBlock::new(Some(block_root), block, blobs).unwrap() + } + } + + /// Builds an `RpcBlock` from a `SignedBeaconBlock` and `BlobsList`. + fn build_rpc_block_from_blobs( + &self, + block_root: Hash256, + block: Arc>>, + blob_items: Option<(KzgProofs, BlobsList)>, + ) -> Result, BlockError> { + Ok(if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + let sampling_column_count = self + .chain + .data_availability_checker + .get_sampling_column_count(); + + if blob_items.is_some_and(|(_, blobs)| !blobs.is_empty()) { + // Note: this method ignores the actual custody columns and just take the first + // `sampling_column_count` for testing purpose only, because the chain does not + // currently have any knowledge of the columns being custodied. + let columns = generate_data_column_sidecars_from_block(&block, &self.spec) + .into_iter() + .take(sampling_column_count) + .map(CustodyDataColumn::from_asserted_custody) + .collect::>(); + RpcBlock::new_with_custody_columns(Some(block_root), block, columns, &self.spec)? + } else { + RpcBlock::new_without_blobs(Some(block_root), block) + } + } else { + let blobs = blob_items + .map(|(proofs, blobs)| { + BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec) + }) + .transpose() + .unwrap(); + RpcBlock::new(Some(block_root), block, blobs)? + }) + } + pub fn process_attestations(&self, attestations: HarnessAttestations) { let num_validators = self.validator_keypairs.len(); let mut unaggregated = Vec::with_capacity(num_validators); @@ -2984,6 +3092,56 @@ where Ok(()) } + + /// Simulate some of the blobs / data columns being seen on gossip. + /// Converts the blobs to data columns if the slot is Fulu or later. + pub async fn process_gossip_blobs_or_columns<'a>( + &self, + block: &SignedBeaconBlock, + blobs: impl Iterator>, + proofs: impl Iterator, + custody_columns_opt: Option>, + ) { + let is_peerdas_enabled = self.chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); + if is_peerdas_enabled { + let custody_columns = custody_columns_opt.unwrap_or_else(|| { + let sampling_column_count = self + .chain + .data_availability_checker + .get_sampling_column_count() as u64; + (0..sampling_column_count).collect() + }); + + let verified_columns = generate_data_column_sidecars_from_block(block, &self.spec) + .into_iter() + .filter(|c| custody_columns.contains(&c.index)) + .map(|sidecar| { + let column_index = sidecar.index; + self.chain + .verify_data_column_sidecar_for_gossip(sidecar, column_index) + }) + .collect::, _>>() + .unwrap(); + + if !verified_columns.is_empty() { + self.chain + .process_gossip_data_columns(verified_columns, || Ok(())) + .await + .unwrap(); + } + } else { + for (i, (kzg_proof, blob)) in proofs.into_iter().zip(blobs).enumerate() { + let sidecar = + Arc::new(BlobSidecar::new(i, blob.clone(), block, *kzg_proof).unwrap()); + let gossip_blob = GossipVerifiedBlob::new(sidecar, i as u64, &self.chain) + .expect("should obtain gossip verified blob"); + self.chain + .process_gossip_blob(gossip_blob) + .await + .expect("should import valid gossip verified blob"); + } + } + } } // Junk `Debug` impl to satistfy certain trait bounds during testing. @@ -3169,10 +3327,59 @@ pub fn generate_rand_block_and_data_columns( SignedBeaconBlock>, DataColumnSidecarList, ) { - let kzg = get_kzg(spec); - let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); - let blob_refs = blobs.iter().map(|b| &b.blob).collect::>(); - let data_columns = blobs_to_data_column_sidecars(&blob_refs, &block, &kzg, spec).unwrap(); - + let (block, _blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); + let data_columns = generate_data_column_sidecars_from_block(&block, spec); (block, data_columns) } + +/// Generate data column sidecars from pre-computed cells and proofs. +fn generate_data_column_sidecars_from_block( + block: &SignedBeaconBlock, + spec: &ChainSpec, +) -> DataColumnSidecarList { + let kzg_commitments = block.message().body().blob_kzg_commitments().unwrap(); + if kzg_commitments.is_empty() { + return vec![]; + } + + let kzg_commitments_inclusion_proof = block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(); + let signed_block_header = block.signed_block_header(); + + // load the precomputed column sidecar to avoid computing them for every block in the tests. + let template_data_columns = RuntimeVariableList::>::from_ssz_bytes( + TEST_DATA_COLUMN_SIDECARS_SSZ, + spec.number_of_columns as usize, + ) + .unwrap(); + + let (cells, proofs) = template_data_columns + .into_iter() + .map(|sidecar| { + let DataColumnSidecar { + column, kzg_proofs, .. + } = sidecar; + // There's only one cell per column for a single blob + let cell_bytes: Vec = column.into_iter().next().unwrap().into(); + let kzg_cell = cell_bytes.try_into().unwrap(); + let kzg_proof = kzg_proofs.into_iter().next().unwrap(); + (kzg_cell, kzg_proof) + }) + .collect::<(Vec<_>, Vec<_>)>(); + + // Repeat the cells and proofs for every blob + let blob_cells_and_proofs_vec = + vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); kzg_commitments.len()]; + + build_data_column_sidecars( + kzg_commitments.clone(), + kzg_commitments_inclusion_proof, + signed_block_header, + blob_cells_and_proofs_vec, + spec, + ) + .unwrap() +} diff --git a/beacon_node/beacon_chain/src/test_utils/fixtures/test_data_column_sidecars.ssz b/beacon_node/beacon_chain/src/test_utils/fixtures/test_data_column_sidecars.ssz new file mode 100644 index 0000000000..112dd43b04 Binary files /dev/null and b/beacon_node/beacon_chain/src/test_utils/fixtures/test_data_column_sidecars.ssz differ diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 6000115993..621475a3ec 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,7 +1,6 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_simulator::produce_unaggregated_attestation; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; use beacon_chain::{metrics, StateSkipConfig, WhenSlotSkipped}; @@ -155,7 +154,6 @@ async fn produces_attestations() { .store .make_full_block(&block_root, blinded_block) .unwrap(); - let blobs = chain.get_blobs(&block_root).unwrap().blobs(); let epoch_boundary_slot = state .current_epoch() @@ -223,8 +221,7 @@ async fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let rpc_block = - RpcBlock::::new(None, Arc::new(block.clone()), blobs.clone()) - .unwrap(); + harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(block.clone())); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available( available_block, ) = chain @@ -296,14 +293,8 @@ async fn early_attester_cache_old_request() { .get_block(&head.beacon_block_root) .unwrap(); - let head_blobs = harness - .chain - .get_blobs(&head.beacon_block_root) - .expect("should get blobs") - .blobs(); - - let rpc_block = - RpcBlock::::new(None, head.beacon_block.clone(), head_blobs).unwrap(); + let rpc_block = harness + .build_rpc_block_from_store_blobs(Some(head.beacon_block_root), head.beacon_block.clone()); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) = harness .chain diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 1a651332ad..2a881b5b0f 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,6 +1,7 @@ #![cfg(not(debug_assertions))] use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; +use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::{ test_utils::{ test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, @@ -9,7 +10,7 @@ use beacon_chain::{ }; use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, - NotifyExecutionLayer, + InvalidSignature, NotifyExecutionLayer, }; use logging::test_logger; use slasher::{Config as SlasherConfig, Slasher}; @@ -34,7 +35,12 @@ const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGT static KEYPAIRS: LazyLock> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); -async fn get_chain_segment() -> (Vec>, Vec>>) { +enum DataSidecars { + Blobs(BlobSidecarList), + DataColumns(Vec>), +} + +async fn get_chain_segment() -> (Vec>, Vec>>) { let harness = get_harness(VALIDATOR_COUNT); harness @@ -46,7 +52,7 @@ async fn get_chain_segment() -> (Vec>, Vec (Vec>, Vec (Vec>, Vec>>) { - let harness = get_harness(VALIDATOR_COUNT); - - harness - .extend_chain( - CHAIN_SEGMENT_LENGTH, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - let mut segment = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); - let mut segment_blobs = Vec::with_capacity(CHAIN_SEGMENT_LENGTH); - for snapshot in harness - .chain - .chain_dump() - .expect("should dump chain") - .into_iter() - .skip(1) - { - let full_block = harness - .chain - .get_block(&snapshot.beacon_block_root) - .await - .unwrap() - .unwrap(); - segment.push(BeaconSnapshot { - beacon_block_root: snapshot.beacon_block_root, - beacon_block: Arc::new(full_block), - beacon_state: snapshot.beacon_state, - }); - let blob_sidecars = harness - .chain - .get_blobs(&snapshot.beacon_block_root) - .unwrap() - .blobs(); - segment_blobs.push(blob_sidecars) - } - (segment, segment_blobs) + (segment, segment_sidecars) } fn get_harness(validator_count: usize) -> BeaconChainHarness> { @@ -137,17 +119,35 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness], - blobs: &[Option>], + chain_segment_sidecars: &[Option>], + spec: &ChainSpec, ) -> Vec> { chain_segment .iter() - .zip(blobs.iter()) - .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + .zip(chain_segment_sidecars.iter()) + .map(|(snapshot, data_sidecars)| { + let block = snapshot.beacon_block.clone(); + build_rpc_block(block, data_sidecars, spec) }) .collect() } +fn build_rpc_block( + block: Arc>, + data_sidecars: &Option>, + spec: &ChainSpec, +) -> RpcBlock { + match data_sidecars { + Some(DataSidecars::Blobs(blobs)) => { + RpcBlock::new(None, block, Some(blobs.clone())).unwrap() + } + Some(DataSidecars::DataColumns(columns)) => { + RpcBlock::new_with_custody_columns(None, block, columns.clone(), spec).unwrap() + } + None => RpcBlock::new_without_blobs(None, block), + } +} + fn junk_signature() -> Signature { let kp = generate_deterministic_keypair(VALIDATOR_COUNT); let message = Hash256::from_slice(&[42; 32]); @@ -186,18 +186,22 @@ fn update_proposal_signatures( } } -fn update_parent_roots( - snapshots: &mut [BeaconSnapshot], - blobs: &mut [Option>], -) { +fn update_parent_roots(snapshots: &mut [BeaconSnapshot], blobs: &mut [Option>]) { for i in 0..snapshots.len() { let root = snapshots[i].beacon_block.canonical_root(); if let (Some(child), Some(child_blobs)) = (snapshots.get_mut(i + 1), blobs.get_mut(i + 1)) { let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature)); - if let Some(blobs) = child_blobs { - update_blob_signed_header(&new_child, blobs); + if let Some(data_sidecars) = child_blobs { + match data_sidecars { + DataSidecars::Blobs(blobs) => { + update_blob_signed_header(&new_child, blobs); + } + DataSidecars::DataColumns(columns) => { + update_data_column_signed_header(&new_child, columns); + } + } } child.beacon_block = new_child; } @@ -225,13 +229,36 @@ fn update_blob_signed_header( } } +fn update_data_column_signed_header( + signed_block: &SignedBeaconBlock, + data_columns: &mut Vec>, +) { + for old_custody_column_sidecar in data_columns.as_mut_slice() { + let old_column_sidecar = old_custody_column_sidecar.as_data_column(); + let new_column_sidecar = Arc::new(DataColumnSidecar:: { + index: old_column_sidecar.index, + column: old_column_sidecar.column.clone(), + kzg_commitments: old_column_sidecar.kzg_commitments.clone(), + kzg_proofs: old_column_sidecar.kzg_proofs.clone(), + signed_block_header: signed_block.signed_block_header(), + kzg_commitments_inclusion_proof: signed_block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(), + }); + *old_custody_column_sidecar = CustodyDataColumn::from_asserted_custody(new_column_sidecar); + } +} + #[tokio::test] async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; - let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); harness .chain @@ -267,9 +294,10 @@ async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { let harness = get_harness(VALIDATOR_COUNT); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; - let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); harness .chain @@ -308,9 +336,10 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a block removed. */ - let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let mut blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); blocks.remove(2); assert!( @@ -328,9 +357,10 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let mut blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); @@ -365,9 +395,10 @@ async fn chain_segment_non_linear_slots() { * Test where a child is lower than the parent. */ - let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let mut blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = Slot::new(0); blocks[3] = RpcBlock::new_without_blobs( @@ -391,9 +422,10 @@ async fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); + let mut blocks: Vec> = + chain_segment_blocks(&chain_segment, &chain_segment_blobs, &harness.spec) + .into_iter() + .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); blocks[3] = RpcBlock::new_without_blobs( @@ -416,7 +448,7 @@ async fn chain_segment_non_linear_slots() { async fn assert_invalid_signature( chain_segment: &[BeaconSnapshot], - chain_segment_blobs: &[Option>], + chain_segment_blobs: &[Option>], harness: &BeaconChainHarness>, block_index: usize, snapshots: &[BeaconSnapshot], @@ -426,7 +458,7 @@ async fn assert_invalid_signature( .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect(); @@ -438,7 +470,7 @@ async fn assert_invalid_signature( .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), - Err(BlockError::InvalidSignature) + Err(BlockError::InvalidSignature(InvalidSignature::Unknown)) ), "should not import chain segment with an invalid {} signature", item @@ -453,7 +485,7 @@ async fn assert_invalid_signature( .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been @@ -468,19 +500,23 @@ async fn assert_invalid_signature( .chain .process_block( snapshots[block_index].beacon_block.canonical_root(), - RpcBlock::new( - None, + build_rpc_block( snapshots[block_index].beacon_block.clone(), - chain_segment_blobs[block_index].clone(), - ) - .unwrap(), + &chain_segment_blobs[block_index], + &harness.spec, + ), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), ) .await; assert!( - matches!(process_res, Err(BlockError::InvalidSignature)), + matches!( + process_res, + Err(BlockError::InvalidSignature( + InvalidSignature::BlockBodySignatures + )) + ), "should not import individual block with an invalid {} signature, got: {:?}", item, process_res @@ -526,7 +562,7 @@ async fn invalid_signature_gossip_block() { .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect(); harness @@ -536,21 +572,25 @@ async fn invalid_signature_gossip_block() { .into_block_error() .expect("should import all blocks prior to the one being tested"); let signed_block = SignedBeaconBlock::from_block(block, junk_signature()); + let process_res = harness + .chain + .process_block( + signed_block.canonical_root(), + Arc::new(signed_block), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ) + .await; assert!( matches!( - harness - .chain - .process_block( - signed_block.canonical_root(), - Arc::new(signed_block), - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ) - .await, - Err(BlockError::InvalidSignature) + process_res, + Err(BlockError::InvalidSignature( + InvalidSignature::ProposerSignature + )) ), - "should not import individual block with an invalid gossip signature", + "should not import individual block with an invalid gossip signature, got: {:?}", + process_res ); } } @@ -574,20 +614,22 @@ async fn invalid_signature_block_proposal() { .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect::>(); // Ensure the block will be rejected if imported in a chain segment. + let process_res = harness + .chain + .process_chain_segment(blocks, NotifyExecutionLayer::Yes) + .await + .into_block_error(); assert!( matches!( - harness - .chain - .process_chain_segment(blocks, NotifyExecutionLayer::Yes) - .await - .into_block_error(), - Err(BlockError::InvalidSignature) + process_res, + Err(BlockError::InvalidSignature(InvalidSignature::Unknown)) ), - "should not import chain segment with an invalid block signature", + "should not import chain segment with an invalid block signature, got: {:?}", + process_res ); } } @@ -880,7 +922,7 @@ async fn invalid_signature_deposit() { .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() + build_rpc_block(snapshot.beacon_block.clone(), blobs, &harness.spec) }) .collect(); assert!( @@ -890,7 +932,7 @@ async fn invalid_signature_deposit() { .process_chain_segment(blocks, NotifyExecutionLayer::Yes) .await .into_block_error(), - Err(BlockError::InvalidSignature) + Err(BlockError::InvalidSignature(InvalidSignature::Unknown)) ), "should not throw an invalid signature error for a bad deposit signature" ); @@ -946,7 +988,7 @@ fn unwrap_err(result: Result) -> U { #[tokio::test] async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); - let (chain_segment, chain_segment_blobs) = get_chain_segment_with_blob_sidecars().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; @@ -958,7 +1000,7 @@ async fn block_gossip_verification() { // Import the ancestors prior to the block we're testing. for (snapshot, blobs_opt) in chain_segment[0..block_index] .iter() - .zip(chain_segment_blobs.iter()) + .zip(chain_segment_blobs.into_iter()) { let gossip_verified = harness .chain @@ -977,20 +1019,8 @@ async fn block_gossip_verification() { ) .await .expect("should import valid gossip verified block"); - if let Some(blob_sidecars) = blobs_opt { - for blob_sidecar in blob_sidecars { - let blob_index = blob_sidecar.index; - let gossip_verified = harness - .chain - .verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index) - .expect("should obtain gossip verified blob"); - - harness - .chain - .process_gossip_blob(gossip_verified) - .await - .expect("should import valid gossip verified blob"); - } + if let Some(data_sidecars) = blobs_opt { + verify_and_process_gossip_data_sidecars(&harness, data_sidecars).await; } } @@ -1086,7 +1116,7 @@ async fn block_gossip_verification() { ))) .await ), - BlockError::ProposalSignatureInvalid + BlockError::InvalidSignature(InvalidSignature::ProposerSignature) ), "should not import a block with an invalid proposal signature" ); @@ -1218,6 +1248,51 @@ async fn block_gossip_verification() { ); } +async fn verify_and_process_gossip_data_sidecars( + harness: &BeaconChainHarness>, + data_sidecars: DataSidecars, +) { + match data_sidecars { + DataSidecars::Blobs(blob_sidecars) => { + for blob_sidecar in blob_sidecars { + let blob_index = blob_sidecar.index; + let gossip_verified = harness + .chain + .verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index) + .expect("should obtain gossip verified blob"); + + harness + .chain + .process_gossip_blob(gossip_verified) + .await + .expect("should import valid gossip verified blob"); + } + } + DataSidecars::DataColumns(column_sidecars) => { + let gossip_verified = column_sidecars + .into_iter() + .map(|column_sidecar| { + let subnet_id = DataColumnSubnetId::from_column_index( + column_sidecar.index(), + &harness.spec, + ); + harness.chain.verify_data_column_sidecar_for_gossip( + column_sidecar.into_inner(), + *subnet_id, + ) + }) + .collect::, _>>() + .expect("should obtain gossip verified columns"); + + harness + .chain + .process_gossip_data_columns(gossip_verified, || Ok(())) + .await + .expect("should import valid gossip verified columns"); + } + } +} + #[tokio::test] async fn verify_block_for_gossip_slashing_detection() { let slasher_dir = tempdir().unwrap(); @@ -1248,20 +1323,14 @@ async fn verify_block_for_gossip_slashing_detection() { let verified_block = harness.chain.verify_block_for_gossip(block1).await.unwrap(); if let Some((kzg_proofs, blobs)) = blobs1 { - let sidecars = - BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs, &spec).unwrap(); - for sidecar in sidecars { - let blob_index = sidecar.index; - let verified_blob = harness - .chain - .verify_blob_sidecar_for_gossip(sidecar, blob_index) - .unwrap(); - harness - .chain - .process_gossip_blob(verified_blob) - .await - .unwrap(); - } + harness + .process_gossip_blobs_or_columns( + verified_block.block(), + blobs.iter(), + kzg_proofs.iter(), + None, + ) + .await; } harness .chain diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index df0d561e1c..44fb298d6c 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -14,7 +14,8 @@ use state_processing::per_block_processing::errors::{ AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid, }; use std::sync::{Arc, LazyLock}; -use store::{LevelDB, StoreConfig}; +use store::database::interface::BeaconNodeBackend; +use store::StoreConfig; use tempfile::{tempdir, TempDir}; use types::*; @@ -26,7 +27,7 @@ static KEYPAIRS: LazyLock> = type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; -type HotColdDB = store::HotColdDB, LevelDB>; +type HotColdDB = store::HotColdDB, BeaconNodeBackend>; fn get_store(db_path: &TempDir) -> Arc { let spec = Arc::new(test_spec::()); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index be7045c54a..41e6467b0f 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -36,6 +36,38 @@ fn get_harness(spec: ChainSpec) -> BeaconChainHarness> { .keypairs(KEYPAIRS.to_vec()) .fresh_ephemeral_store() .chain_config(chain_config) + .mock_execution_layer() + .build(); + + harness.advance_slot(); + + harness +} + +fn get_electra_harness(spec: ChainSpec) -> BeaconChainHarness> { + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..Default::default() + }; + + let spec = Arc::new(spec); + + let harness = BeaconChainHarness::builder(E::default()) + .spec(spec.clone()) + .keypairs(KEYPAIRS.to_vec()) + .with_genesis_state_builder(|builder| { + builder.set_initial_balance_fn(Box::new(move |i| { + // Use a variety of balances between min activation balance and max effective balance. + let balance = spec.max_effective_balance_electra + / (i as u64 + 1) + / spec.effective_balance_increment + * spec.effective_balance_increment; + balance.max(spec.min_activation_balance) + })) + }) + .fresh_ephemeral_store() + .chain_config(chain_config) + .mock_execution_layer() .build(); harness.advance_slot(); @@ -560,6 +592,83 @@ async fn test_rewards_altair_inactivity_leak_justification_epoch() { assert_eq!(expected_balances, balances); } +#[tokio::test] +async fn test_rewards_electra() { + let spec = ForkName::Electra.make_genesis_spec(E::default_spec()); + let harness = get_electra_harness(spec.clone()); + let target_epoch = 0; + + // advance until epoch N + 1 and get initial balances + harness + .extend_slots((E::slots_per_epoch() * (target_epoch + 1)) as usize) + .await; + let mut expected_balances = harness.get_current_state().balances().to_vec(); + + // advance until epoch N + 2 and build proposal rewards map + let mut proposal_rewards_map = HashMap::new(); + let mut sync_committee_rewards_map = HashMap::new(); + for _ in 0..E::slots_per_epoch() { + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + harness.make_block_return_pre_state(state, slot).await; + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward(signed_block.message(), &mut state) + .unwrap(); + + let total_proposer_reward = proposal_rewards_map + .entry(beacon_block_reward.proposer_index) + .or_insert(0); + *total_proposer_reward += beacon_block_reward.total as i64; + + // calculate sync committee rewards / penalties + let reward_payload = harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + + for reward in reward_payload { + let total_sync_reward = sync_committee_rewards_map + .entry(reward.validator_index) + .or_insert(0); + *total_sync_reward += reward.reward; + } + + harness.extend_slots(1).await; + } + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert ideal rewards are greater than 0 + assert_eq!( + ideal_rewards.len() as u64, + spec.max_effective_balance_electra / spec.effective_balance_increment + ); + assert!(ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + + // apply attestation, proposal, and sync committee rewards and penalties to initial balances + apply_attestation_rewards(&mut expected_balances, total_rewards); + apply_other_rewards(&mut expected_balances, &proposal_rewards_map); + apply_other_rewards(&mut expected_balances, &sync_committee_rewards_map); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().to_vec(); + + assert_eq!(expected_balances, balances); +} + #[tokio::test] async fn test_rewards_base_subset_only() { let spec = ForkName::Base.make_genesis_spec(E::default_spec()); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 60d46e8269..7a2df76970 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1,7 +1,6 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_verification::Error as AttnError; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; @@ -25,10 +24,11 @@ use std::collections::HashSet; use std::convert::TryInto; use std::sync::{Arc, LazyLock}; use std::time::Duration; +use store::database::interface::BeaconNodeBackend; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION, STATE_UPPER_LIMIT_NO_RETAIN}; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, - BlobInfo, DBColumn, HotColdDB, LevelDB, StoreConfig, + BlobInfo, DBColumn, HotColdDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; use tokio::time::sleep; @@ -46,15 +46,19 @@ static KEYPAIRS: LazyLock> = type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; -fn get_store(db_path: &TempDir) -> Arc, LevelDB>> { - get_store_generic(db_path, StoreConfig::default(), test_spec::()) +fn get_store(db_path: &TempDir) -> Arc, BeaconNodeBackend>> { + let store_config = StoreConfig { + prune_payloads: false, + ..StoreConfig::default() + }; + get_store_generic(db_path, store_config, test_spec::()) } fn get_store_generic( db_path: &TempDir, config: StoreConfig, spec: ChainSpec, -) -> Arc, LevelDB>> { +) -> Arc, BeaconNodeBackend>> { let hot_path = db_path.path().join("chain_db"); let cold_path = db_path.path().join("freezer_db"); let blobs_path = db_path.path().join("blobs_db"); @@ -73,7 +77,7 @@ fn get_store_generic( } fn get_harness( - store: Arc, LevelDB>>, + store: Arc, BeaconNodeBackend>>, validator_count: usize, ) -> TestHarness { // Most tests expect to retain historic states, so we use this as the default. @@ -81,13 +85,26 @@ fn get_harness( reconstruct_historic_states: true, ..ChainConfig::default() }; - get_harness_generic(store, validator_count, chain_config) + get_harness_generic(store, validator_count, chain_config, false) +} + +fn get_harness_import_all_data_columns( + store: Arc, BeaconNodeBackend>>, + validator_count: usize, +) -> TestHarness { + // Most tests expect to retain historic states, so we use this as the default. + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }; + get_harness_generic(store, validator_count, chain_config, true) } fn get_harness_generic( - store: Arc, LevelDB>>, + store: Arc, BeaconNodeBackend>>, validator_count: usize, chain_config: ChainConfig, + import_all_data_columns: bool, ) -> TestHarness { let harness = TestHarness::builder(MinimalEthSpec) .spec(store.get_chain_spec().clone()) @@ -96,6 +113,7 @@ fn get_harness_generic( .fresh_disk_store(store) .mock_execution_layer() .chain_config(chain_config) + .import_all_data_columns(import_all_data_columns) .build(); harness.advance_slot(); harness @@ -244,7 +262,6 @@ async fn full_participation_no_skips() { AttestationStrategy::AllValidators, ) .await; - check_finalization(&harness, num_blocks_produced); check_split_slot(&harness, store); check_chain_dump(&harness, num_blocks_produced + 1); @@ -2286,7 +2303,12 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let temp1 = tempdir().unwrap(); let full_store = get_store(&temp1); - let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT); + + // TODO(das): Run a supernode so the node has full blobs stored. + // This may not be required in the future if we end up implementing downloading checkpoint + // blobs from p2p peers: + // https://github.com/sigp/lighthouse/issues/6837 + let harness = get_harness_import_all_data_columns(full_store.clone(), LOW_VALIDATOR_COUNT); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); @@ -2319,10 +2341,8 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .unwrap(); let wss_blobs_opt = harness .chain - .store - .get_blobs(&wss_block_root) - .unwrap() - .blobs(); + .get_or_reconstruct_blobs(&wss_block_root) + .unwrap(); let wss_state = full_store .get_state(&wss_state_root, Some(checkpoint_slot)) .unwrap() @@ -2395,14 +2415,16 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); + // This test may break in the future if we no longer store the full checkpoint data columns. let store_wss_blobs_opt = beacon_chain - .store - .get_blobs(&wss_block_root) - .unwrap() - .blobs(); + .get_or_reconstruct_blobs(&wss_block_root) + .unwrap(); assert_eq!(store_wss_block, wss_block); - assert_eq!(store_wss_blobs_opt, wss_blobs_opt); + // TODO(fulu): Remove this condition once #6760 (PeerDAS checkpoint sync) is merged. + if !beacon_chain.spec.is_peer_das_scheduled() { + assert_eq!(store_wss_blobs_opt, wss_blobs_opt); + } // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); @@ -2418,7 +2440,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); + let slot = full_block.slot(); let state_root = full_block.state_root(); @@ -2426,7 +2448,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { beacon_chain .process_block( full_block.canonical_root(), - RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), + harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -2480,13 +2502,12 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .expect("should get block") .expect("should get block"); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); if let MaybeAvailableBlock::Available(block) = harness .chain .data_availability_checker .verify_kzg_for_rpc_block( - RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), + harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), ) .expect("should verify kzg") { @@ -2554,6 +2575,15 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { if block_root != prev_block_root { assert_eq!(block.slot(), slot); } + + // Prune_payloads is set to false in the default config, so the payload should exist + if block.message().execution_payload().is_ok() { + assert!(beacon_chain + .store + .execution_payload_exists(&block_root) + .unwrap(),); + } + prev_block_root = block_root; } @@ -2587,7 +2617,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { reconstruct_historic_states: false, ..ChainConfig::default() }; - let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config); + let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config, false); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); @@ -3075,6 +3105,10 @@ async fn deneb_prune_blobs_happy_case() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); + if store.get_chain_spec().is_peer_das_scheduled() { + // TODO(fulu): add prune tests for Fulu / PeerDAS data columns. + return; + } let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { // No-op prior to Deneb. return; @@ -3122,6 +3156,10 @@ async fn deneb_prune_blobs_no_finalization() { let db_path = tempdir().unwrap(); let store = get_store(&db_path); + if store.get_chain_spec().is_peer_das_scheduled() { + // TODO(fulu): add prune tests for Fulu / PeerDAS data columns. + return; + } let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { // No-op prior to Deneb. return; @@ -3266,6 +3304,10 @@ async fn deneb_prune_blobs_margin_test(margin: u64) { let db_path = tempdir().unwrap(); let store = get_store_generic(&db_path, config, test_spec::()); + if store.get_chain_spec().is_peer_das_scheduled() { + // TODO(fulu): add prune tests for Fulu / PeerDAS data columns. + return; + } let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else { // No-op prior to Deneb. return; @@ -3508,7 +3550,10 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { } /// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch. -fn check_split_slot(harness: &TestHarness, store: Arc, LevelDB>>) { +fn check_split_slot( + harness: &TestHarness, + store: Arc, BeaconNodeBackend>>, +) { let split_slot = store.get_split_slot(); assert_eq!( harness @@ -3526,7 +3571,6 @@ fn check_split_slot(harness: &TestHarness, store: Arc, L /// Check that all the states in a chain dump have the correct tree hash. fn check_chain_dump(harness: &TestHarness, expected_len: u64) { let mut chain_dump = harness.chain.chain_dump().unwrap(); - let split_slot = harness.chain.store.get_split_slot(); assert_eq!(chain_dump.len() as u64, expected_len); @@ -3553,13 +3597,12 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) { // Check presence of execution payload on disk. if harness.chain.spec.bellatrix_fork_epoch.is_some() { - assert_eq!( + assert!( harness .chain .store .execution_payload_exists(&checkpoint.beacon_block_root) .unwrap(), - checkpoint.beacon_block.slot() >= split_slot, "incorrect payload storage for block at slot {}: {:?}", checkpoint.beacon_block.slot(), checkpoint.beacon_block_root, diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 0edda2f95b..2743f93bb3 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -62,9 +62,9 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc; use tokio::sync::mpsc::error::TrySendError; use types::{ - Attestation, BeaconState, ChainSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, SubnetId, + Attestation, BeaconState, ChainSpec, EthSpec, Hash256, RelativeEpoch, SignedAggregateAndProof, + SingleAttestation, Slot, SubnetId, }; -use types::{EthSpec, Slot}; use work_reprocessing_queue::{ spawn_reprocess_scheduler, QueuedAggregate, QueuedLightClientUpdate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork, @@ -504,10 +504,10 @@ impl From for WorkEvent { /// Items required to verify a batch of unaggregated gossip attestations. #[derive(Debug)] -pub struct GossipAttestationPackage { +pub struct GossipAttestationPackage { pub message_id: MessageId, pub peer_id: PeerId, - pub attestation: Box>, + pub attestation: Box, pub subnet_id: SubnetId, pub should_import: bool, pub seen_timestamp: Duration, @@ -549,21 +549,32 @@ pub enum BlockingOrAsync { Blocking(BlockingFn), Async(AsyncFn), } +pub type GossipAttestationBatch = Vec>>; /// Indicates the type of work to be performed and therefore its priority and /// queuing specifics. pub enum Work { GossipAttestation { - attestation: Box>, - process_individual: Box) + Send + Sync>, - process_batch: Box>) + Send + Sync>, + attestation: Box>>, + process_individual: Box>) + Send + Sync>, + process_batch: Box) + Send + Sync>, + }, + // Attestation requiring conversion before processing. + // + // For now this is a `SingleAttestation`, but eventually we will switch this around so that + // legacy `Attestation`s are converted and the main processing pipeline operates on + // `SingleAttestation`s. + GossipAttestationToConvert { + attestation: Box>, + process_individual: + Box) + Send + Sync>, }, UnknownBlockAttestation { process_fn: BlockingFn, }, GossipAttestationBatch { - attestations: Vec>, - process_batch: Box>) + Send + Sync>, + attestations: GossipAttestationBatch, + process_batch: Box) + Send + Sync>, }, GossipAggregate { aggregate: Box>, @@ -639,6 +650,7 @@ impl fmt::Debug for Work { #[strum(serialize_all = "snake_case")] pub enum WorkType { GossipAttestation, + GossipAttestationToConvert, UnknownBlockAttestation, GossipAttestationBatch, GossipAggregate, @@ -690,6 +702,7 @@ impl Work { fn to_type(&self) -> WorkType { match self { Work::GossipAttestation { .. } => WorkType::GossipAttestation, + Work::GossipAttestationToConvert { .. } => WorkType::GossipAttestationToConvert, Work::GossipAttestationBatch { .. } => WorkType::GossipAttestationBatch, Work::GossipAggregate { .. } => WorkType::GossipAggregate, Work::GossipAggregateBatch { .. } => WorkType::GossipAggregateBatch, @@ -849,6 +862,7 @@ impl BeaconProcessor { let mut aggregate_queue = LifoQueue::new(queue_lengths.aggregate_queue); let mut aggregate_debounce = TimeLatch::default(); let mut attestation_queue = LifoQueue::new(queue_lengths.attestation_queue); + let mut attestation_to_convert_queue = LifoQueue::new(queue_lengths.attestation_queue); let mut attestation_debounce = TimeLatch::default(); let mut unknown_block_aggregate_queue = LifoQueue::new(queue_lengths.unknown_block_aggregate_queue); @@ -1180,6 +1194,9 @@ impl BeaconProcessor { None } } + // Convert any gossip attestations that need to be converted. + } else if let Some(item) = attestation_to_convert_queue.pop() { + Some(item) // Check sync committee messages after attestations as their rewards are lesser // and they don't influence fork choice. } else if let Some(item) = sync_contribution_queue.pop() { @@ -1301,6 +1318,9 @@ impl BeaconProcessor { match work { _ if can_spawn => self.spawn_worker(work, idle_tx), Work::GossipAttestation { .. } => attestation_queue.push(work), + Work::GossipAttestationToConvert { .. } => { + attestation_to_convert_queue.push(work) + } // Attestation batches are formed internally within the // `BeaconProcessor`, they are not sent from external services. Work::GossipAttestationBatch { .. } => crit!( @@ -1430,7 +1450,8 @@ impl BeaconProcessor { if let Some(modified_queue_id) = modified_queue_id { let queue_len = match modified_queue_id { - WorkType::GossipAttestation => aggregate_queue.len(), + WorkType::GossipAttestation => attestation_queue.len(), + WorkType::GossipAttestationToConvert => attestation_to_convert_queue.len(), WorkType::UnknownBlockAttestation => unknown_block_attestation_queue.len(), WorkType::GossipAttestationBatch => 0, // No queue WorkType::GossipAggregate => aggregate_queue.len(), @@ -1563,6 +1584,12 @@ impl BeaconProcessor { } => task_spawner.spawn_blocking(move || { process_individual(*attestation); }), + Work::GossipAttestationToConvert { + attestation, + process_individual, + } => task_spawner.spawn_blocking(move || { + process_individual(*attestation); + }), Work::GossipAttestationBatch { attestations, process_batch, @@ -1717,7 +1744,7 @@ mod tests { #[test] fn min_queue_len() { // State with no validators. - let spec = ForkName::latest().make_genesis_spec(ChainSpec::mainnet()); + let spec = ForkName::latest_stable().make_genesis_spec(ChainSpec::mainnet()); let genesis_time = 0; let state = BeaconState::::new(genesis_time, Eth1Data::default(), &spec); assert_eq!(state.validators().len(), 0); diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 3531e81c84..1920bd0ebb 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -6,7 +6,9 @@ authors = ["Sean Anderson "] [dependencies] eth2 = { workspace = true } +ethereum_ssz = { workspace = true } lighthouse_version = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 91ee00a65f..5f64ac7e43 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,16 +1,24 @@ use eth2::types::builder_bid::SignedBuilderBid; +use eth2::types::fork_versioned_response::EmptyMetadata; use eth2::types::{ - EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes, - SignedValidatorRegistrationData, Slot, + ContentType, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, ForkVersionDeserialize, + ForkVersionedResponse, PublicKeyBytes, SignedValidatorRegistrationData, Slot, }; use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; pub use eth2::Error; -use eth2::{ok_or_error, StatusCode, CONSENSUS_VERSION_HEADER}; -use reqwest::header::{HeaderMap, HeaderValue}; +use eth2::{ + ok_or_error, StatusCode, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, + JSON_CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER, +}; +use reqwest::header::{HeaderMap, HeaderValue, ACCEPT}; use reqwest::{IntoUrl, Response}; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde::Serialize; +use ssz::Encode; +use std::str::FromStr; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; use std::time::Duration; pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; @@ -49,6 +57,7 @@ pub struct BuilderHttpClient { server: SensitiveUrl, timeouts: Timeouts, user_agent: String, + ssz_enabled: Arc, } impl BuilderHttpClient { @@ -64,6 +73,7 @@ impl BuilderHttpClient { server, timeouts: Timeouts::new(builder_header_timeout), user_agent, + ssz_enabled: Arc::new(false.into()), }) } @@ -71,6 +81,78 @@ impl BuilderHttpClient { &self.user_agent } + fn fork_name_from_header(&self, headers: &HeaderMap) -> Result, String> { + headers + .get(CONSENSUS_VERSION_HEADER) + .map(|fork_name| { + fork_name + .to_str() + .map_err(|e| e.to_string()) + .and_then(ForkName::from_str) + }) + .transpose() + } + + fn content_type_from_header(&self, headers: &HeaderMap) -> ContentType { + let Some(content_type) = headers.get(CONTENT_TYPE_HEADER).map(|content_type| { + let content_type = content_type.to_str(); + match content_type { + Ok(SSZ_CONTENT_TYPE_HEADER) => ContentType::Ssz, + _ => ContentType::Json, + } + }) else { + return ContentType::Json; + }; + content_type + } + + async fn get_with_header< + T: DeserializeOwned + ForkVersionDecode + ForkVersionDeserialize, + U: IntoUrl, + >( + &self, + url: U, + timeout: Duration, + headers: HeaderMap, + ) -> Result, Error> { + let response = self + .get_response_with_header(url, Some(timeout), headers) + .await?; + + let headers = response.headers().clone(); + let response_bytes = response.bytes().await?; + + let Ok(Some(fork_name)) = self.fork_name_from_header(&headers) else { + // if no fork version specified, attempt to fallback to JSON + self.ssz_enabled.store(false, Ordering::SeqCst); + return serde_json::from_slice(&response_bytes).map_err(Error::InvalidJson); + }; + + let content_type = self.content_type_from_header(&headers); + + match content_type { + ContentType::Ssz => { + self.ssz_enabled.store(true, Ordering::SeqCst); + T::from_ssz_bytes_by_fork(&response_bytes, fork_name) + .map(|data| ForkVersionedResponse { + version: Some(fork_name), + metadata: EmptyMetadata {}, + data, + }) + .map_err(Error::InvalidSsz) + } + ContentType::Json => { + self.ssz_enabled.store(false, Ordering::SeqCst); + serde_json::from_slice(&response_bytes).map_err(Error::InvalidJson) + } + } + } + + /// Return `true` if the most recently received response from the builder had SSZ Content-Type. + pub fn is_ssz_enabled(&self) -> bool { + self.ssz_enabled.load(Ordering::SeqCst) + } + async fn get_with_timeout( &self, url: U, @@ -83,6 +165,21 @@ impl BuilderHttpClient { .map_err(Into::into) } + /// Perform a HTTP GET request, returning the `Response` for further processing. + async fn get_response_with_header( + &self, + url: U, + timeout: Option, + headers: HeaderMap, + ) -> Result { + let mut builder = self.client.get(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.headers(headers).send().await.map_err(Error::from)?; + ok_or_error(response).await + } + /// Perform a HTTP GET request, returning the `Response` for further processing. async fn get_response_with_timeout( &self, @@ -112,6 +209,32 @@ impl BuilderHttpClient { ok_or_error(response).await } + async fn post_ssz_with_raw_response( + &self, + url: U, + ssz_body: Vec, + mut headers: HeaderMap, + timeout: Option, + ) -> Result { + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + + headers.insert( + CONTENT_TYPE_HEADER, + HeaderValue::from_static(SSZ_CONTENT_TYPE_HEADER), + ); + + let response = builder + .headers(headers) + .body(ssz_body) + .send() + .await + .map_err(Error::from)?; + ok_or_error(response).await + } + async fn post_with_raw_response( &self, url: U, @@ -152,6 +275,42 @@ impl BuilderHttpClient { Ok(()) } + /// `POST /eth/v1/builder/blinded_blocks` with SSZ serialized request body + pub async fn post_builder_blinded_blocks_ssz( + &self, + blinded_block: &SignedBlindedBeaconBlock, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + let body = blinded_block.as_ssz_bytes(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1") + .push("builder") + .push("blinded_blocks"); + + let mut headers = HeaderMap::new(); + if let Ok(value) = HeaderValue::from_str(&blinded_block.fork_name_unchecked().to_string()) { + headers.insert(CONSENSUS_VERSION_HEADER, value); + } + + let result = self + .post_ssz_with_raw_response( + path, + body, + headers, + Some(self.timeouts.post_blinded_blocks), + ) + .await? + .bytes() + .await?; + + FullPayloadContents::from_ssz_bytes_by_fork(&result, blinded_block.fork_name_unchecked()) + .map_err(Error::InvalidSsz) + } + /// `POST /eth/v1/builder/blinded_blocks` pub async fn post_builder_blinded_blocks( &self, @@ -202,7 +361,17 @@ impl BuilderHttpClient { .push(format!("{parent_hash:?}").as_str()) .push(pubkey.as_hex_string().as_str()); - let resp = self.get_with_timeout(path, self.timeouts.get_header).await; + let mut headers = HeaderMap::new(); + if let Ok(ssz_content_type_header) = HeaderValue::from_str(&format!( + "{}; q=1.0,{}; q=0.9", + SSZ_CONTENT_TYPE_HEADER, JSON_CONTENT_TYPE_HEADER + )) { + headers.insert(ACCEPT, ssz_content_type_header); + }; + + let resp = self + .get_with_header(path, self.timeouts.get_header, headers) + .await; if matches!(resp, Err(Error::StatusCode(StatusCode::NO_CONTENT))) { Ok(None) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 1cd9e89b96..e3bfd60a48 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -14,7 +14,7 @@ use beacon_chain::{ eth1_chain::{CachingEth1Backend, Eth1Chain}, slot_clock::{SlotClock, SystemTimeSlotClock}, state_advance_timer::spawn_state_advance_timer, - store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, + store::{HotColdDB, ItemStore, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; use beacon_chain::{Kzg, LightClientProducerEvent}; @@ -41,6 +41,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; +use store::database::interface::BeaconNodeBackend; use timer::spawn_timer; use tokio::sync::oneshot; use types::{ @@ -1030,7 +1031,7 @@ where } impl - ClientBuilder, LevelDB>> + ClientBuilder, BeaconNodeBackend>> where TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index daf2bf6ed4..747383754a 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -829,7 +829,8 @@ impl HttpJsonRpc { Ok(response.into()) } - pub async fn new_payload_v5_fulu( + // TODO(fulu): switch to v5 endpoint when the EL is ready for Fulu + pub async fn new_payload_v4_fulu( &self, new_payload_request_fulu: NewPayloadRequestFulu<'_, E>, ) -> Result { @@ -844,7 +845,7 @@ impl HttpJsonRpc { let response: JsonPayloadStatusV1 = self .rpc_request( - ENGINE_NEW_PAYLOAD_V5, + ENGINE_NEW_PAYLOAD_V4, params, ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, ) @@ -962,6 +963,19 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } + // TODO(fulu): remove when v5 method is ready. + ForkName::Fulu => { + let response: JsonGetPayloadResponseV5 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V4, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + JsonGetPayloadResponse::V5(response) + .try_into() + .map_err(Error::BadResponse) + } _ => Err(Error::UnsupportedForkVariant(format!( "called get_payload_v4 with {}", fork_name @@ -1263,10 +1277,11 @@ impl HttpJsonRpc { } } NewPayloadRequest::Fulu(new_payload_request_fulu) => { - if engine_capabilities.new_payload_v5 { - self.new_payload_v5_fulu(new_payload_request_fulu).await + // TODO(fulu): switch to v5 endpoint when the EL is ready for Fulu + if engine_capabilities.new_payload_v4 { + self.new_payload_v4_fulu(new_payload_request_fulu).await } else { - Err(Error::RequiredMethodUnsupported("engine_newPayloadV5")) + Err(Error::RequiredMethodUnsupported("engine_newPayloadV4")) } } } @@ -1305,8 +1320,9 @@ impl HttpJsonRpc { } } ForkName::Fulu => { - if engine_capabilities.get_payload_v5 { - self.get_payload_v5(fork_name, payload_id).await + // TODO(fulu): switch to v5 when the EL is ready + if engine_capabilities.get_payload_v4 { + self.get_payload_v4(fork_name, payload_id).await } else { Err(Error::RequiredMethodUnsupported("engine_getPayloadv5")) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 95b4b50925..96615297d8 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -991,3 +991,154 @@ impl TryFrom for ClientVersionV1 { }) } } + +#[cfg(test)] +mod tests { + use ssz::Encode; + use types::{ + ConsolidationRequest, DepositRequest, MainnetEthSpec, PublicKeyBytes, RequestType, + SignatureBytes, WithdrawalRequest, + }; + + use super::*; + + fn create_request_string(prefix: u8, request_bytes: &T) -> String { + format!( + "0x{:02x}{}", + prefix, + hex::encode(request_bytes.as_ssz_bytes()) + ) + } + + /// Tests all error conditions except ssz decoding errors + /// + /// *** + /// Elements of the list MUST be ordered by request_type in ascending order. + /// Elements with empty request_data MUST be excluded from the list. + /// If any element is out of order, has a length of 1-byte or shorter, + /// or more than one element has the same type byte, client software MUST return -32602: Invalid params error. + /// *** + #[test] + fn test_invalid_execution_requests() { + let deposit_request = DepositRequest { + pubkey: PublicKeyBytes::empty(), + withdrawal_credentials: Hash256::random(), + amount: 32, + signature: SignatureBytes::empty(), + index: 0, + }; + + let consolidation_request = ConsolidationRequest { + source_address: Address::random(), + source_pubkey: PublicKeyBytes::empty(), + target_pubkey: PublicKeyBytes::empty(), + }; + + let withdrawal_request = WithdrawalRequest { + amount: 32, + source_address: Address::random(), + validator_pubkey: PublicKeyBytes::empty(), + }; + + // First check a valid request with all requests + assert!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + create_request_string(RequestType::Withdrawal.to_u8(), &withdrawal_request), + create_request_string(RequestType::Consolidation.to_u8(), &consolidation_request), + ])) + .is_ok() + ); + + // Single requests + assert!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + ])) + .is_ok() + ); + + assert!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Withdrawal.to_u8(), &withdrawal_request), + ])) + .is_ok() + ); + + assert!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Consolidation.to_u8(), &consolidation_request), + ])) + .is_ok() + ); + + // Out of order + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Withdrawal.to_u8(), &withdrawal_request), + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + ])) + .unwrap_err(), + RequestsError::InvalidOrdering + )); + + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Consolidation.to_u8(), &consolidation_request), + create_request_string(RequestType::Withdrawal.to_u8(), &withdrawal_request), + ])) + .unwrap_err(), + RequestsError::InvalidOrdering + )); + + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Consolidation.to_u8(), &consolidation_request), + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + ])) + .unwrap_err(), + RequestsError::InvalidOrdering + )); + + // Multiple requests of same type + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + ])) + .unwrap_err(), + RequestsError::InvalidOrdering + )); + + // Invalid prefix + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(42, &deposit_request), + ])) + .unwrap_err(), + RequestsError::InvalidPrefix(42) + )); + + // Prefix followed by no data + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + create_request_string( + RequestType::Consolidation.to_u8(), + &Vec::::new() + ), + ])) + .unwrap_err(), + RequestsError::EmptyRequest(1) + )); + // Empty request + assert!(matches!( + ExecutionRequests::::try_from(JsonExecutionRequests(vec![ + create_request_string(RequestType::Deposit.to_u8(), &deposit_request), + "0x".to_string() + ])) + .unwrap_err(), + RequestsError::EmptyRequest(1) + )); + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f7abe73543..6e5e4fca01 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -121,8 +121,7 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { payload: ExecutionPayloadHeader::Fulu(builder_bid.header).into(), @@ -159,6 +158,7 @@ pub enum Error { }, ZeroLengthTransaction, PayloadBodiesByRangeNotSupported, + GetBlobsNotSupported, InvalidJWTSecret(String), InvalidForkForPayload, InvalidPayloadBody(String), @@ -330,7 +330,7 @@ impl> BlockProposalContents { pub parent_hash: ExecutionBlockHash, pub parent_gas_limit: u64, @@ -1872,7 +1872,7 @@ impl ExecutionLayer { .map_err(Box::new) .map_err(Error::EngineError) } else { - Ok(vec![None; query.len()]) + Err(Error::GetBlobsNotSupported) } } @@ -1901,11 +1901,18 @@ impl ExecutionLayer { if let Some(builder) = self.builder() { let (payload_result, duration) = timed_future(metrics::POST_BLINDED_PAYLOAD_BUILDER, async { - builder - .post_builder_blinded_blocks(block) - .await - .map_err(Error::Builder) - .map(|d| d.data) + if builder.is_ssz_enabled() { + builder + .post_builder_blinded_blocks_ssz(block) + .await + .map_err(Error::Builder) + } else { + builder + .post_builder_blinded_blocks(block) + .await + .map_err(Error::Builder) + .map(|d| d.data) + } }) .await; diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 0babb9d1a3..d727d2c159 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -230,7 +230,8 @@ pub async fn handle_rpc( if method == ENGINE_NEW_PAYLOAD_V1 || method == ENGINE_NEW_PAYLOAD_V2 || method == ENGINE_NEW_PAYLOAD_V3 - || method == ENGINE_NEW_PAYLOAD_V4 + // TODO(fulu): Uncomment this once v5 method is ready for Fulu + // || method == ENGINE_NEW_PAYLOAD_V4 { return Err(( format!("{} called after Fulu fork!", method), @@ -264,15 +265,16 @@ pub async fn handle_rpc( GENERIC_ERROR_CODE, )); } - if matches!(request, JsonExecutionPayload::V4(_)) { - return Err(( - format!( - "{} called with `ExecutionPayloadV4` after Fulu fork!", - method - ), - GENERIC_ERROR_CODE, - )); - } + // TODO(fulu): remove once we switch to v5 + // if matches!(request, JsonExecutionPayload::V4(_)) { + // return Err(( + // format!( + // "{} called with `ExecutionPayloadV4` after Fulu fork!", + // method + // ), + // GENERIC_ERROR_CODE, + // )); + // } } _ => unreachable!(), }; @@ -381,8 +383,9 @@ pub async fn handle_rpc( == ForkName::Fulu && (method == ENGINE_GET_PAYLOAD_V1 || method == ENGINE_GET_PAYLOAD_V2 - || method == ENGINE_GET_PAYLOAD_V3 - || method == ENGINE_GET_PAYLOAD_V4) + || method == ENGINE_GET_PAYLOAD_V3) + // TODO(fulu): Uncomment this once v5 method is ready for Fulu + // || method == ENGINE_GET_PAYLOAD_V4) { return Err(( format!("{} called after Fulu fork!", method), @@ -448,6 +451,22 @@ pub async fn handle_rpc( }) .unwrap() } + // TODO(fulu): remove this once we switch to v5 method + JsonExecutionPayload::V5(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV5 { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V5 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + execution_requests: Default::default(), + }) + .unwrap() + } _ => unreachable!(), }), ENGINE_GET_PAYLOAD_V5 => Ok(match JsonExecutionPayload::from(response) { diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 65181dcf4f..f07ee7ac6f 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,10 +1,20 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; -use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId}; -use eth2::{BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER}; +use bytes::Bytes; +use eth2::types::PublishBlockRequest; +use eth2::types::{ + BlobsBundle, BlockId, BroadcastValidation, EventKind, EventTopic, FullPayloadContents, + ProposerData, StateId, ValidatorId, +}; +use eth2::{ + BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, + SSZ_CONTENT_TYPE_HEADER, +}; use fork_choice::ForkchoiceUpdateParameters; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; +use slog::{debug, error, info, warn, Logger}; +use ssz::Encode; use std::collections::HashMap; use std::fmt::Debug; use std::future::Future; @@ -13,20 +23,27 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tempfile::NamedTempFile; +use tokio_stream::StreamExt; use tree_hash::TreeHash; use types::builder_bid::{ BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, BuilderBidFulu, SignedBuilderBid, }; use types::{ - Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, ExecutionRequests, FixedBytesExtended, ForkName, + Address, BeaconState, ChainSpec, Epoch, EthSpec, ExecPayload, ExecutionPayload, + ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; use types::{ExecutionBlockHash, SecretKey}; +use warp::reply::{self, Reply}; use warp::{Filter, Rejection}; +pub const DEFAULT_FEE_RECIPIENT: Address = Address::repeat_byte(42); +pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; +pub const DEFAULT_BUILDER_PRIVATE_KEY: &str = + "607a11b45a7219cc61a3d9c5fd08c7eebd602a6a19a977f8d3771d5711a550f2"; + #[derive(Clone)] pub enum Operation { FeeRecipient(Address), @@ -259,6 +276,17 @@ impl BidStuff for BuilderBid { } } +// Non referenced version of `PayloadParameters` +#[derive(Clone)] +pub struct PayloadParametersCloned { + pub parent_hash: ExecutionBlockHash, + pub parent_gas_limit: u64, + pub proposer_gas_limit: Option, + pub payload_attributes: PayloadAttributes, + pub forkchoice_update_params: ForkchoiceUpdateParameters, + pub current_fork: ForkName, +} + #[derive(Clone)] pub struct MockBuilder { el: ExecutionLayer, @@ -268,6 +296,20 @@ pub struct MockBuilder { builder_sk: SecretKey, operations: Arc>>, invalidate_signatures: Arc>, + genesis_time: Option, + /// Only returns bids for registered validators if set to true. `true` by default. + validate_pubkey: bool, + /// Do not apply any operations if set to `false`. + /// Applying operations might modify the cached header in the execution layer. + /// Use this if you want get_header to return a valid bid that can be eventually submitted as + /// a valid block. + apply_operations: bool, + payload_id_cache: Arc>>, + /// If set to `true`, sets the bid returned by `get_header` to Uint256::MAX + max_bid: bool, + /// A cache that stores the proposers index for a given epoch + proposers_cache: Arc>>>, + log: Logger, } impl MockBuilder { @@ -295,7 +337,12 @@ impl MockBuilder { let builder = MockBuilder::new( el, BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), + true, + true, + false, spec, + None, + executor.log().clone(), ); let host: Ipv4Addr = Ipv4Addr::LOCALHOST; let port = 0; @@ -303,21 +350,47 @@ impl MockBuilder { (builder, server) } + #[allow(clippy::too_many_arguments)] pub fn new( el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, + validate_pubkey: bool, + apply_operations: bool, + max_bid: bool, spec: Arc, + sk: Option<&[u8]>, + log: Logger, ) -> Self { - let sk = SecretKey::random(); + let builder_sk = if let Some(sk_bytes) = sk { + match SecretKey::deserialize(sk_bytes) { + Ok(sk) => sk, + Err(_) => { + error!( + log, + "Invalid sk_bytes provided, generating random secret key" + ); + SecretKey::random() + } + } + } else { + SecretKey::deserialize(&hex::decode(DEFAULT_BUILDER_PRIVATE_KEY).unwrap()).unwrap() + }; Self { el, beacon_client, // Should keep spec and context consistent somehow spec, val_registration_cache: Arc::new(RwLock::new(HashMap::new())), - builder_sk: sk, + builder_sk, + validate_pubkey, operations: Arc::new(RwLock::new(vec![])), invalidate_signatures: Arc::new(RwLock::new(false)), + payload_id_cache: Arc::new(RwLock::new(HashMap::new())), + proposers_cache: Arc::new(RwLock::new(HashMap::new())), + apply_operations, + max_bid, + genesis_time: None, + log, } } @@ -342,8 +415,523 @@ impl MockBuilder { } bid.stamp_payload(); } + + /// Return the public key of the builder + pub fn public_key(&self) -> PublicKeyBytes { + self.builder_sk.public_key().compress() + } + + pub async fn register_validators( + &self, + registrations: Vec, + ) -> Result<(), String> { + info!( + self.log, + "Registering validators"; + "count" => registrations.len(), + ); + for registration in registrations { + if !registration.verify_signature(&self.spec) { + error!( + self.log, + "Failed to register validator"; + "error" => "invalid signature", + "validator" => %registration.message.pubkey + ); + return Err("invalid signature".to_string()); + } + self.val_registration_cache + .write() + .insert(registration.message.pubkey, registration); + } + Ok(()) + } + + pub async fn submit_blinded_block( + &self, + block: SignedBlindedBeaconBlock, + ) -> Result, String> { + let root = match &block { + SignedBlindedBeaconBlock::Base(_) | types::SignedBeaconBlock::Altair(_) => { + return Err("invalid fork".to_string()); + } + SignedBlindedBeaconBlock::Bellatrix(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Deneb(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Electra(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Fulu(block) => { + block.message.body.execution_payload.tree_hash_root() + } + }; + info!( + self.log, + "Submitting blinded beacon block to builder"; + "block_hash" => %root + ); + let payload = self + .el + .get_payload_by_root(&root) + .ok_or_else(|| "missing payload for tx root".to_string())?; + + let (payload, blobs) = payload.deconstruct(); + let full_block = block + .try_into_full_block(Some(payload.clone())) + .ok_or("Internal error, just provided a payload")?; + debug!( + self.log, + "Got full payload, sending to local beacon node for propagation"; + "txs_count" => payload.transactions().len(), + "blob_count" => blobs.as_ref().map(|b| b.commitments.len()) + ); + let publish_block_request = PublishBlockRequest::new( + Arc::new(full_block), + blobs.clone().map(|b| (b.proofs, b.blobs)), + ); + self.beacon_client + .post_beacon_blocks_v2(&publish_block_request, Some(BroadcastValidation::Gossip)) + .await + .map_err(|e| format!("Failed to post blinded block {:?}", e))?; + Ok(FullPayloadContents::new(payload, blobs)) + } + + pub async fn get_header( + &self, + slot: Slot, + parent_hash: ExecutionBlockHash, + pubkey: PublicKeyBytes, + ) -> Result, String> { + info!(self.log, "In get_header"); + // Check if the pubkey has registered with the builder if required + if self.validate_pubkey && !self.val_registration_cache.read().contains_key(&pubkey) { + return Err("validator not registered with builder".to_string()); + } + let payload_parameters = { + let mut guard = self.payload_id_cache.write(); + guard.remove(&parent_hash) + }; + + let payload_parameters = match payload_parameters { + Some(params) => params, + None => { + warn!( + self.log, + "Payload params not cached for parent_hash {}", parent_hash + ); + self.get_payload_params(slot, None, pubkey, None).await? + } + }; + + info!(self.log, "Got payload params"); + + let fork = self.fork_name_at_slot(slot); + let payload_response_type = self + .el + .get_full_payload_caching(PayloadParameters { + parent_hash: payload_parameters.parent_hash, + parent_gas_limit: payload_parameters.parent_gas_limit, + proposer_gas_limit: payload_parameters.proposer_gas_limit, + payload_attributes: &payload_parameters.payload_attributes, + forkchoice_update_params: &payload_parameters.forkchoice_update_params, + current_fork: payload_parameters.current_fork, + }) + .await + .map_err(|e| format!("couldn't get payload {:?}", e))?; + + info!(self.log, "Got payload message, fork {}", fork); + + let mut message = match payload_response_type { + crate::GetPayloadResponseType::Full(payload_response) => { + #[allow(clippy::type_complexity)] + let (payload, value, maybe_blobs_bundle, maybe_requests): ( + ExecutionPayload, + Uint256, + Option>, + Option>, + ) = payload_response.into(); + + match fork { + ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { + header: payload + .as_fulu() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + execution_requests: maybe_requests.unwrap_or_default(), + }), + ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { + header: payload + .as_electra() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + execution_requests: maybe_requests.unwrap_or_default(), + }), + ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { + header: payload + .as_deneb() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + }), + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { + header: payload + .as_capella() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + }), + ForkName::Bellatrix => BuilderBid::Bellatrix(BuilderBidBellatrix { + header: payload + .as_bellatrix() + .map_err(|_| "incorrect payload variant".to_string())? + .into(), + value: self.get_bid_value(value), + pubkey: self.builder_sk.public_key().compress(), + }), + ForkName::Base | ForkName::Altair => return Err("invalid fork".to_string()), + } + } + _ => panic!("just requested full payload, cannot get blinded"), + }; + + if self.apply_operations { + info!(self.log, "Applying operations"); + self.apply_operations(&mut message); + } + info!(self.log, "Signing builder message"); + + let mut signature = message.sign_builder_message(&self.builder_sk, &self.spec); + + if *self.invalidate_signatures.read() { + signature = Signature::empty(); + }; + let signed_bid = SignedBuilderBid { message, signature }; + info!(self.log, "Builder bid {:?}", &signed_bid.message.value()); + Ok(signed_bid) + } + + fn fork_name_at_slot(&self, slot: Slot) -> ForkName { + self.spec.fork_name_at_slot::(slot) + } + + fn get_bid_value(&self, value: Uint256) -> Uint256 { + if self.max_bid { + Uint256::MAX + } else if !self.apply_operations { + value + } else { + Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI) + } + } + + /// Prepare the execution layer for payload creation every slot for the correct + /// proposer index + pub async fn prepare_execution_layer(&self) -> Result<(), String> { + info!( + self.log, + "Starting a task to prepare the execution layer"; + ); + let mut head_event_stream = self + .beacon_client + .get_events::(&[EventTopic::Head]) + .await + .map_err(|e| format!("Failed to get head event {:?}", e))?; + + while let Some(Ok(event)) = head_event_stream.next().await { + match event { + EventKind::Head(head) => { + debug!( + self.log, + "Got a new head event"; + "block_hash" => %head.block + ); + let next_slot = head.slot + 1; + // Find the next proposer index from the cached data or through a beacon api call + let epoch = next_slot.epoch(E::slots_per_epoch()); + let position_in_slot = next_slot.as_u64() % E::slots_per_epoch(); + let proposer_data = { + let proposers_opt = { + let proposers_cache = self.proposers_cache.read(); + proposers_cache.get(&epoch).cloned() + }; + match proposers_opt { + Some(proposers) => proposers + .get(position_in_slot as usize) + .expect("position in slot is max epoch size") + .clone(), + None => { + // make a call to the beacon api and populate the cache + let duties: Vec<_> = self + .beacon_client + .get_validator_duties_proposer(epoch) + .await + .map_err(|e| { + format!( + "Failed to get proposer duties for epoch: {}, {:?}", + epoch, e + ) + })? + .data; + let proposer_data = duties + .get(position_in_slot as usize) + .expect("position in slot is max epoch size") + .clone(); + self.proposers_cache.write().insert(epoch, duties); + proposer_data + } + } + }; + self.prepare_execution_layer_internal( + head.slot, + head.block, + proposer_data.validator_index, + proposer_data.pubkey, + ) + .await?; + } + e => { + warn!( + self.log, + "Got an unexpected event"; + "event" => %e.topic_name() + ); + } + } + } + Ok(()) + } + + async fn prepare_execution_layer_internal( + &self, + current_slot: Slot, + head_block_root: Hash256, + validator_index: u64, + pubkey: PublicKeyBytes, + ) -> Result<(), String> { + let next_slot = current_slot + 1; + let payload_parameters = self + .get_payload_params( + next_slot, + Some(head_block_root), + pubkey, + Some(validator_index), + ) + .await?; + + self.payload_id_cache + .write() + .insert(payload_parameters.parent_hash, payload_parameters); + Ok(()) + } + + /// Get the `PayloadParameters` for requesting an ExecutionPayload for `slot` + /// for the given `validator_index` and `pubkey`. + async fn get_payload_params( + &self, + slot: Slot, + head_block_root: Option, + pubkey: PublicKeyBytes, + validator_index: Option, + ) -> Result { + let fork = self.fork_name_at_slot(slot); + + let block_id = match head_block_root { + Some(block_root) => BlockId::Root(block_root), + None => BlockId::Head, + }; + let head = self + .beacon_client + .get_beacon_blocks::(block_id) + .await + .map_err(|_| "couldn't get head".to_string())? + .ok_or_else(|| "missing head block".to_string())? + .data; + + let head_block_root = head_block_root.unwrap_or(head.canonical_root()); + + let head_execution_payload = head + .message() + .body() + .execution_payload() + .map_err(|_| "pre-merge block".to_string())?; + let head_execution_hash = head_execution_payload.block_hash(); + let head_gas_limit = head_execution_payload.gas_limit(); + + let finalized_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Finalized) + .await + .map_err(|_| "couldn't get finalized block".to_string())? + .ok_or_else(|| "missing finalized block".to_string())? + .data + .message() + .body() + .execution_payload() + .map_err(|_| "pre-merge block".to_string())? + .block_hash(); + + let justified_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Justified) + .await + .map_err(|_| "couldn't get justified block".to_string())? + .ok_or_else(|| "missing justified block".to_string())? + .data + .message() + .body() + .execution_payload() + .map_err(|_| "pre-merge block".to_string())? + .block_hash(); + + let (fee_recipient, proposer_gas_limit) = + match self.val_registration_cache.read().get(&pubkey) { + Some(cached_data) => ( + cached_data.message.fee_recipient, + cached_data.message.gas_limit, + ), + None => { + warn!( + self.log, + "Validator not registered {}, using default fee recipient and gas limits", + pubkey + ); + (DEFAULT_FEE_RECIPIENT, DEFAULT_GAS_LIMIT) + } + }; + let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); + + let genesis_time = if let Some(genesis_time) = self.genesis_time { + genesis_time + } else { + self.beacon_client + .get_beacon_genesis() + .await + .map_err(|_| "couldn't get beacon genesis".to_string())? + .data + .genesis_time + }; + let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; + + let head_state: BeaconState = self + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(|_| "couldn't get state".to_string())? + .ok_or_else(|| "missing state".to_string())? + .data; + + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(|_| "couldn't get prev randao".to_string())?; + + let expected_withdrawals = if fork.capella_enabled() { + Some( + self.beacon_client + .get_expected_withdrawals(&StateId::Head) + .await + .map_err(|e| format!("Failed to get expected withdrawals: {:?}", e))? + .data, + ) + } else { + None + }; + + let payload_attributes = match fork { + // the withdrawals root is filled in by operations, but we supply the valid withdrawals + // first to avoid polluting the execution block generator with invalid payload attributes + // NOTE: this was part of an effort to add payload attribute uniqueness checks, + // which was abandoned because it broke too many tests in subtle ways. + ForkName::Bellatrix | ForkName::Capella => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + None, + ), + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + Some(head_block_root), + ), + ForkName::Base | ForkName::Altair => { + return Err("invalid fork".to_string()); + } + }; + + // Tells the execution layer that the `validator_index` is expected to propose + // a block on top of `head_block_root` for the given slot + let val_index = validator_index.unwrap_or( + self.beacon_client + .get_beacon_states_validator_id(StateId::Head, &ValidatorId::PublicKey(pubkey)) + .await + .map_err(|_| "couldn't get validator".to_string())? + .ok_or_else(|| "missing validator".to_string())? + .data + .index, + ); + + self.el + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) + .await; + + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_hash: Some(head_execution_hash), + finalized_hash: Some(finalized_execution_hash), + justified_hash: Some(justified_execution_hash), + head_root: head_block_root, + }; + + let _status = self + .el + .notify_forkchoice_updated( + head_execution_hash, + justified_execution_hash, + finalized_execution_hash, + slot - 1, + head_block_root, + ) + .await + .map_err(|e| format!("fcu call failed : {:?}", e))?; + + let payload_parameters = PayloadParametersCloned { + parent_hash: head_execution_hash, + parent_gas_limit: head_gas_limit, + proposer_gas_limit: Some(proposer_gas_limit), + payload_attributes, + forkchoice_update_params, + current_fork: fork, + }; + Ok(payload_parameters) + } } +/// Serve the builder api using warp. Uses the functions defined in `MockBuilder` to serve +/// the requests. +/// +/// We should eventually move this to axum when we move everything else. pub fn serve( listen_addr: Ipv4Addr, listen_port: u16, @@ -362,17 +950,41 @@ pub fn serve( .and(warp::path::end()) .and(ctx_filter.clone()) .and_then( - |registrations: Vec, builder: MockBuilder| async move { - for registration in registrations { - if !registration.verify_signature(&builder.spec) { - return Err(reject("invalid signature")); - } - builder - .val_registration_cache - .write() - .insert(registration.message.pubkey, registration); - } - Ok(warp::reply()) + |registrations: Vec, + builder: MockBuilder| async move { + builder + .register_validators(registrations) + .await + .map_err(|e| warp::reject::custom(Custom(e)))?; + Ok::<_, Rejection>(warp::reply()) + }, + ) + .boxed(); + + let blinded_block_ssz = prefix + .and(warp::path("blinded_blocks")) + .and(warp::body::bytes()) + .and(warp::header::header::(CONSENSUS_VERSION_HEADER)) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |block_bytes: Bytes, fork_name: ForkName, builder: MockBuilder| async move { + let block = + SignedBlindedBeaconBlock::::from_ssz_bytes_by_fork(&block_bytes, fork_name) + .map_err(|e| warp::reject::custom(Custom(format!("{:?}", e))))?; + let payload = builder + .submit_blinded_block(block) + .await + .map_err(|e| warp::reject::custom(Custom(e)))?; + + Ok::<_, warp::reject::Rejection>( + warp::http::Response::builder() + .status(200) + .body(payload.as_ssz_bytes()) + .map(add_ssz_content_type_header) + .map(|res| add_consensus_version_header(res, fork_name)) + .unwrap(), + ) }, ); @@ -387,30 +999,10 @@ pub fn serve( |block: SignedBlindedBeaconBlock, fork_name: ForkName, builder: MockBuilder| async move { - let root = match block { - SignedBlindedBeaconBlock::Base(_) | types::SignedBeaconBlock::Altair(_) => { - return Err(reject("invalid fork")); - } - SignedBlindedBeaconBlock::Bellatrix(block) => { - block.message.body.execution_payload.tree_hash_root() - } - SignedBlindedBeaconBlock::Capella(block) => { - block.message.body.execution_payload.tree_hash_root() - } - SignedBlindedBeaconBlock::Deneb(block) => { - block.message.body.execution_payload.tree_hash_root() - } - SignedBlindedBeaconBlock::Electra(block) => { - block.message.body.execution_payload.tree_hash_root() - } - SignedBlindedBeaconBlock::Fulu(block) => { - block.message.body.execution_payload.tree_hash_root() - } - }; let payload = builder - .el - .get_payload_by_root(&root) - .ok_or_else(|| reject("missing payload for tx root"))?; + .submit_blinded_block(block) + .await + .map_err(|e| warp::reject::custom(Custom(e)))?; let resp: ForkVersionedResponse<_> = ForkVersionedResponse { version: Some(fork_name), metadata: Default::default(), @@ -448,328 +1040,47 @@ pub fn serve( ) .and(warp::path::end()) .and(ctx_filter.clone()) + .and(warp::header::optional::("accept")) .and_then( |slot: Slot, parent_hash: ExecutionBlockHash, pubkey: PublicKeyBytes, - builder: MockBuilder| async move { - let fork = builder.spec.fork_name_at_slot::(slot); - let signed_cached_data = builder - .val_registration_cache - .read() - .get(&pubkey) - .ok_or_else(|| reject("missing registration"))? - .clone(); - let cached_data = signed_cached_data.message; - - let head = builder - .beacon_client - .get_beacon_blocks::(BlockId::Head) + builder: MockBuilder, + accept_header: Option| async move { + let fork_name = builder.fork_name_at_slot(slot); + let signed_bid = builder + .get_header(slot, parent_hash, pubkey) .await - .map_err(|_| reject("couldn't get head"))? - .ok_or_else(|| reject("missing head block"))?; - - let block = head.data.message(); - let head_block_root = block.tree_hash_root(); - let head_execution_payload = block - .body() - .execution_payload() - .map_err(|_| reject("pre-merge block"))?; - let head_execution_hash = head_execution_payload.block_hash(); - let head_gas_limit = head_execution_payload.gas_limit(); - if head_execution_hash != parent_hash { - return Err(reject("head mismatch")); - } - - let finalized_execution_hash = builder - .beacon_client - .get_beacon_blocks::(BlockId::Finalized) - .await - .map_err(|_| reject("couldn't get finalized block"))? - .ok_or_else(|| reject("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(|_| reject("pre-merge block"))? - .block_hash(); - - let justified_execution_hash = builder - .beacon_client - .get_beacon_blocks::(BlockId::Justified) - .await - .map_err(|_| reject("couldn't get justified block"))? - .ok_or_else(|| reject("missing justified block"))? - .data - .message() - .body() - .execution_payload() - .map_err(|_| reject("pre-merge block"))? - .block_hash(); - - let val_index = builder - .beacon_client - .get_beacon_states_validator_id(StateId::Head, &ValidatorId::PublicKey(pubkey)) - .await - .map_err(|_| reject("couldn't get validator"))? - .ok_or_else(|| reject("missing validator"))? - .data - .index; - let fee_recipient = cached_data.fee_recipient; - let slots_since_genesis = slot.as_u64() - builder.spec.genesis_slot.as_u64(); - - let genesis_data = builder - .beacon_client - .get_beacon_genesis() - .await - .map_err(|_| reject("couldn't get beacon genesis"))? - .data; - let genesis_time = genesis_data.genesis_time; - let timestamp = - (slots_since_genesis * builder.spec.seconds_per_slot) + genesis_time; - - let head_state: BeaconState = builder - .beacon_client - .get_debug_beacon_states(StateId::Head) - .await - .map_err(|_| reject("couldn't get state"))? - .ok_or_else(|| reject("missing state"))? - .data; - let prev_randao = head_state - .get_randao_mix(head_state.current_epoch()) - .map_err(|_| reject("couldn't get prev randao"))?; - - let expected_withdrawals = if fork.capella_enabled() { - Some( - builder - .beacon_client - .get_expected_withdrawals(&StateId::Head) - .await - .unwrap() - .data, - ) - } else { - None - }; - - let payload_attributes = match fork { - // the withdrawals root is filled in by operations, but we supply the valid withdrawals - // first to avoid polluting the execution block generator with invalid payload attributes - // NOTE: this was part of an effort to add payload attribute uniqueness checks, - // which was abandoned because it broke too many tests in subtle ways. - ForkName::Bellatrix | ForkName::Capella => PayloadAttributes::new( - timestamp, - *prev_randao, - fee_recipient, - expected_withdrawals, - None, + .map_err(|e| warp::reject::custom(Custom(e)))?; + let accept_header = accept_header.unwrap_or(eth2::types::Accept::Any); + match accept_header { + eth2::types::Accept::Ssz => Ok::<_, Rejection>( + warp::http::Response::builder() + .status(200) + .body(signed_bid.as_ssz_bytes()) + .map(add_ssz_content_type_header) + .map(|res| add_consensus_version_header(res, fork_name)) + .unwrap(), ), - ForkName::Deneb | ForkName::Electra | ForkName::Fulu => PayloadAttributes::new( - timestamp, - *prev_randao, - fee_recipient, - expected_withdrawals, - Some(head_block_root), - ), - ForkName::Base | ForkName::Altair => { - return Err(reject("invalid fork")); + eth2::types::Accept::Json | eth2::types::Accept::Any => { + let resp: ForkVersionedResponse<_> = ForkVersionedResponse { + version: Some(fork_name), + metadata: Default::default(), + data: signed_bid, + }; + Ok::<_, Rejection>(warp::reply::json(&resp).into_response()) } - }; - - builder - .el - .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) - .await; - - let forkchoice_update_params = ForkchoiceUpdateParameters { - head_root: Hash256::zero(), - head_hash: None, - justified_hash: Some(justified_execution_hash), - finalized_hash: Some(finalized_execution_hash), - }; - - let proposer_gas_limit = builder - .val_registration_cache - .read() - .get(&pubkey) - .map(|v| v.message.gas_limit); - - let payload_parameters = PayloadParameters { - parent_hash: head_execution_hash, - parent_gas_limit: head_gas_limit, - proposer_gas_limit, - payload_attributes: &payload_attributes, - forkchoice_update_params: &forkchoice_update_params, - current_fork: fork, - }; - - let payload_response_type = builder - .el - .get_full_payload_caching(payload_parameters) - .await - .map_err(|_| reject("couldn't get payload"))?; - - let mut message = match payload_response_type { - crate::GetPayloadResponseType::Full(payload_response) => { - #[allow(clippy::type_complexity)] - let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( - ExecutionPayload, - Uint256, - Option>, - Option>, - ) = payload_response.into(); - - match fork { - ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { - header: payload - .as_fulu() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { - header: payload - .as_electra() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { - header: payload - .as_deneb() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { - header: payload - .as_capella() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Bellatrix => BuilderBid::Bellatrix(BuilderBidBellatrix { - header: payload - .as_bellatrix() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Base | ForkName::Altair => { - return Err(reject("invalid fork")) - } - } - } - crate::GetPayloadResponseType::Blinded(payload_response) => { - #[allow(clippy::type_complexity)] - let (payload, _block_value, maybe_blobs_bundle, _maybe_requests): ( - ExecutionPayload, - Uint256, - Option>, - Option>, - ) = payload_response.into(); - match fork { - ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { - header: payload - .as_fulu() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { - header: payload - .as_electra() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb { - header: payload - .as_deneb() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) - .unwrap_or_default(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { - header: payload - .as_capella() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Bellatrix => BuilderBid::Bellatrix(BuilderBidBellatrix { - header: payload - .as_bellatrix() - .map_err(|_| reject("incorrect payload variant"))? - .into(), - value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), - pubkey: builder.builder_sk.public_key().compress(), - }), - ForkName::Base | ForkName::Altair => { - return Err(reject("invalid fork")) - } - } - } - }; - - builder.apply_operations(&mut message); - - let mut signature = - message.sign_builder_message(&builder.builder_sk, &builder.spec); - - if *builder.invalidate_signatures.read() { - signature = Signature::empty(); } - - let fork_name = builder - .spec - .fork_name_at_epoch(slot.epoch(E::slots_per_epoch())); - let signed_bid = SignedBuilderBid { message, signature }; - let resp: ForkVersionedResponse<_> = ForkVersionedResponse { - version: Some(fork_name), - metadata: Default::default(), - data: signed_bid, - }; - let json_bid = serde_json::to_string(&resp) - .map_err(|_| reject("coudn't serialize signed bid"))?; - Ok::<_, Rejection>( - warp::http::Response::builder() - .status(200) - .body(json_bid) - .unwrap(), - ) }, ); let routes = warp::post() - .and(validators.or(blinded_block)) + // Routes which expect `application/octet-stream` go within this `and`. + .and( + warp::header::exact(CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER) + .and(blinded_block_ssz), + ) + .or(validators.or(blinded_block)) .or(warp::get().and(status).or(header)) .map(|reply| warp::reply::with_header(reply, "Server", "lighthouse-mock-builder-server")); @@ -782,3 +1093,13 @@ pub fn serve( fn reject(msg: &'static str) -> Rejection { warp::reject::custom(Custom(msg.to_string())) } + +/// Add the 'Content-Type application/octet-stream` header to a response. +fn add_ssz_content_type_header(reply: T) -> warp::reply::Response { + reply::with_header(reply, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER).into_response() +} + +/// Add the `Eth-Consensus-Version` header to a response. +fn add_consensus_version_header(reply: T, fork_name: ForkName) -> warp::reply::Response { + reply::with_header(reply, CONSENSUS_VERSION_HEADER, fork_name.to_string()).into_response() +} diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index 90c4ad6e66..4fccc0393b 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -24,10 +24,134 @@ fn eth1_withdrawal_credentials(pubkey: &PublicKey, spec: &ChainSpec) -> Hash256 Hash256::from_slice(&credentials) } +pub type WithdrawalCredentialsFn = + Box Fn(usize, &'a PublicKey, &'a ChainSpec) -> Hash256>; + /// Builds a genesis state as defined by the Eth2 interop procedure (see below). /// /// Reference: /// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start +#[derive(Default)] +pub struct InteropGenesisBuilder { + /// Mapping from validator index to initial balance for each validator. + /// + /// If `None`, then the default balance of 32 ETH will be used. + initial_balance_fn: Option u64>>, + + /// Mapping from validator index and pubkey to withdrawal credentials for each validator. + /// + /// If `None`, then default BLS withdrawal credentials will be used. + withdrawal_credentials_fn: Option, + + /// The execution payload header to embed in the genesis state. + execution_payload_header: Option>, +} + +impl InteropGenesisBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn set_initial_balance_fn(mut self, initial_balance_fn: Box u64>) -> Self { + self.initial_balance_fn = Some(initial_balance_fn); + self + } + + pub fn set_withdrawal_credentials_fn( + mut self, + withdrawal_credentials_fn: WithdrawalCredentialsFn, + ) -> Self { + self.withdrawal_credentials_fn = Some(withdrawal_credentials_fn); + self + } + + pub fn set_alternating_eth1_withdrawal_credentials(self) -> Self { + self.set_withdrawal_credentials_fn(Box::new(alternating_eth1_withdrawal_credentials_fn)) + } + + pub fn set_execution_payload_header( + self, + execution_payload_header: ExecutionPayloadHeader, + ) -> Self { + self.set_opt_execution_payload_header(Some(execution_payload_header)) + } + + pub fn set_opt_execution_payload_header( + mut self, + execution_payload_header: Option>, + ) -> Self { + self.execution_payload_header = execution_payload_header; + self + } + + pub fn build_genesis_state( + self, + keypairs: &[Keypair], + genesis_time: u64, + eth1_block_hash: Hash256, + spec: &ChainSpec, + ) -> Result, String> { + // Generate withdrawal credentials using provided function, or default BLS. + let withdrawal_credentials_fn = self.withdrawal_credentials_fn.unwrap_or_else(|| { + Box::new(|_, pubkey, spec| bls_withdrawal_credentials(pubkey, spec)) + }); + + let withdrawal_credentials = keypairs + .iter() + .map(|key| &key.pk) + .enumerate() + .map(|(i, pubkey)| withdrawal_credentials_fn(i, pubkey, spec)) + .collect::>(); + + // Generate initial balances. + let initial_balance_fn = self + .initial_balance_fn + .unwrap_or_else(|| Box::new(|_| spec.max_effective_balance)); + + let eth1_timestamp = 2_u64.pow(40); + + let initial_balances = (0..keypairs.len()) + .map(initial_balance_fn) + .collect::>(); + + let datas = keypairs + .into_par_iter() + .zip(withdrawal_credentials.into_par_iter()) + .zip(initial_balances.into_par_iter()) + .map(|((keypair, withdrawal_credentials), amount)| { + let mut data = DepositData { + withdrawal_credentials, + pubkey: keypair.pk.clone().into(), + amount, + signature: Signature::empty().into(), + }; + + data.signature = data.create_signature(&keypair.sk, spec); + + data + }) + .collect::>(); + + let mut state = initialize_beacon_state_from_eth1( + eth1_block_hash, + eth1_timestamp, + genesis_deposits(datas, spec)?, + self.execution_payload_header, + spec, + ) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + *state.genesis_time_mut() = genesis_time; + + // Invalidate all the caches after all the manual state surgery. + state + .drop_all_caches() + .map_err(|e| format!("Unable to drop caches: {:?}", e))?; + + Ok(state) + } +} + pub fn interop_genesis_state( keypairs: &[Keypair], genesis_time: u64, @@ -35,18 +159,21 @@ pub fn interop_genesis_state( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { - let withdrawal_credentials = keypairs - .iter() - .map(|keypair| bls_withdrawal_credentials(&keypair.pk, spec)) - .collect::>(); - interop_genesis_state_with_withdrawal_credentials::( - keypairs, - &withdrawal_credentials, - genesis_time, - eth1_block_hash, - execution_payload_header, - spec, - ) + InteropGenesisBuilder::new() + .set_opt_execution_payload_header(execution_payload_header) + .build_genesis_state(keypairs, genesis_time, eth1_block_hash, spec) +} + +fn alternating_eth1_withdrawal_credentials_fn<'a>( + index: usize, + pubkey: &'a PublicKey, + spec: &'a ChainSpec, +) -> Hash256 { + if index % 2usize == 0usize { + bls_withdrawal_credentials(pubkey, spec) + } else { + eth1_withdrawal_credentials(pubkey, spec) + } } // returns an interop genesis state except every other @@ -58,80 +185,10 @@ pub fn interop_genesis_state_with_eth1( execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { - let withdrawal_credentials = keypairs - .iter() - .enumerate() - .map(|(index, keypair)| { - if index % 2 == 0 { - bls_withdrawal_credentials(&keypair.pk, spec) - } else { - eth1_withdrawal_credentials(&keypair.pk, spec) - } - }) - .collect::>(); - interop_genesis_state_with_withdrawal_credentials::( - keypairs, - &withdrawal_credentials, - genesis_time, - eth1_block_hash, - execution_payload_header, - spec, - ) -} - -pub fn interop_genesis_state_with_withdrawal_credentials( - keypairs: &[Keypair], - withdrawal_credentials: &[Hash256], - genesis_time: u64, - eth1_block_hash: Hash256, - execution_payload_header: Option>, - spec: &ChainSpec, -) -> Result, String> { - if keypairs.len() != withdrawal_credentials.len() { - return Err(format!( - "wrong number of withdrawal credentials, expected: {}, got: {}", - keypairs.len(), - withdrawal_credentials.len() - )); - } - - let eth1_timestamp = 2_u64.pow(40); - let amount = spec.max_effective_balance; - - let datas = keypairs - .into_par_iter() - .zip(withdrawal_credentials.into_par_iter()) - .map(|(keypair, &withdrawal_credentials)| { - let mut data = DepositData { - withdrawal_credentials, - pubkey: keypair.pk.clone().into(), - amount, - signature: Signature::empty().into(), - }; - - data.signature = data.create_signature(&keypair.sk, spec); - - data - }) - .collect::>(); - - let mut state = initialize_beacon_state_from_eth1( - eth1_block_hash, - eth1_timestamp, - genesis_deposits(datas, spec)?, - execution_payload_header, - spec, - ) - .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; - - *state.genesis_time_mut() = genesis_time; - - // Invalidate all the caches after all the manual state surgery. - state - .drop_all_caches() - .map_err(|e| format!("Unable to drop caches: {:?}", e))?; - - Ok(state) + InteropGenesisBuilder::new() + .set_alternating_eth1_withdrawal_credentials() + .set_opt_execution_payload_header(execution_payload_header) + .build_genesis_state(keypairs, genesis_time, eth1_block_hash, spec) } #[cfg(test)] diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 3fb053bf88..1fba64aafb 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -7,6 +7,6 @@ pub use eth1::Eth1Endpoint; pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use interop::{ bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1, - interop_genesis_state_with_withdrawal_credentials, DEFAULT_ETH1_BLOCK_HASH, + InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH, }; pub use types::test_utils::generate_deterministic_keypairs; diff --git a/beacon_node/http_api/src/aggregate_attestation.rs b/beacon_node/http_api/src/aggregate_attestation.rs new file mode 100644 index 0000000000..94b6acd2e6 --- /dev/null +++ b/beacon_node/http_api/src/aggregate_attestation.rs @@ -0,0 +1,65 @@ +use crate::api_types::GenericResponse; +use crate::unsupported_version_rejection; +use crate::version::{add_consensus_version_header, V1, V2}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{self, EndpointVersion, Hash256, Slot}; +use std::sync::Arc; +use types::fork_versioned_response::EmptyMetadata; +use types::{CommitteeIndex, ForkVersionedResponse}; +use warp::{ + hyper::{Body, Response}, + reply::Reply, +}; + +pub fn get_aggregate_attestation( + slot: Slot, + attestation_data_root: &Hash256, + committee_index: Option, + endpoint_version: EndpointVersion, + chain: Arc>, +) -> Result, warp::reject::Rejection> { + if endpoint_version == V2 { + let Some(committee_index) = committee_index else { + return Err(warp_utils::reject::custom_bad_request( + "missing committee index".to_string(), + )); + }; + let aggregate_attestation = chain + .get_aggregated_attestation_electra(slot, attestation_data_root, committee_index) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch aggregate: {:?}", + e + )) + })? + .ok_or_else(|| { + warp_utils::reject::custom_not_found("no matching aggregate found".to_string()) + })?; + let fork_name = chain.spec.fork_name_at_slot::(slot); + let fork_versioned_response = ForkVersionedResponse { + version: Some(fork_name), + metadata: EmptyMetadata {}, + data: aggregate_attestation, + }; + Ok(add_consensus_version_header( + warp::reply::json(&fork_versioned_response).into_response(), + fork_name, + )) + } else if endpoint_version == V1 { + let aggregate_attestation = chain + .get_pre_electra_aggregated_attestation_by_slot_and_root(slot, attestation_data_root) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch aggregate: {:?}", + e + )) + })? + .map(GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found("no matching aggregate found".to_string()) + })?; + Ok(warp::reply::json(&aggregate_attestation).into_response()) + } else { + return Err(unsupported_version_rejection(endpoint_version)); + } +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 21a1cd48ba..7f9000b2bd 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -5,6 +5,7 @@ //! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are //! used for development. +mod aggregate_attestation; mod attestation_performance; mod attester_duties; mod block_id; @@ -30,7 +31,6 @@ mod validator; mod validator_inclusion; mod validators; mod version; - use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::fork_versioned_response; @@ -52,6 +52,7 @@ use eth2::types::{ }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; +use lighthouse_network::rpc::methods::MetaData; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use logging::SSELoggingComponents; @@ -62,6 +63,7 @@ pub use publish_blocks::{ publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock, }; use serde::{Deserialize, Serialize}; +use serde_json::Value; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; @@ -84,11 +86,11 @@ use tokio_stream::{ }; use types::{ fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, - AttesterSlashing, BeaconStateError, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, - ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, RelativeEpoch, - SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, - SingleAttestation, Slot, SyncCommitteeMessage, SyncContributionData, + AttesterSlashing, BeaconStateError, ChainSpec, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, + ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, + RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -169,7 +171,7 @@ impl Default for Config { sse_capacity_multiplier: 1, enable_beacon_processor: true, duplicate_block_status_code: StatusCode::ACCEPTED, - enable_light_client_server: false, + enable_light_client_server: true, target_peers: 100, } } @@ -1277,6 +1279,9 @@ pub fn serve( let consensus_version_header_filter = warp::header::header::(CONSENSUS_VERSION_HEADER); + let optional_consensus_version_header_filter = + warp::header::optional::(CONSENSUS_VERSION_HEADER); + // POST beacon/blocks let post_beacon_blocks = eth_v1 .and(warp::path("beacon")) @@ -1827,20 +1832,19 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()); - let beacon_pool_path_any = any_version - .and(warp::path("beacon")) - .and(warp::path("pool")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - let beacon_pool_path_v2 = eth_v2 .and(warp::path("beacon")) .and(warp::path("pool")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()); - // POST beacon/pool/attestations - let post_beacon_pool_attestations = beacon_pool_path + let beacon_pool_path_any = any_version + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + + let post_beacon_pool_attestations_v1 = beacon_pool_path .clone() .and(warp::path("attestations")) .and(warp::path::end()) @@ -1849,9 +1853,6 @@ pub fn serve( .and(reprocess_send_filter.clone()) .and(log_filter.clone()) .then( - // V1 and V2 are identical except V2 has a consensus version header in the request. - // We only require this header for SSZ deserialization, which isn't supported for - // this endpoint presently. |task_spawner: TaskSpawner, chain: Arc>, attestations: Vec>, @@ -1877,18 +1878,40 @@ pub fn serve( .clone() .and(warp::path("attestations")) .and(warp::path::end()) - .and(warp_utils::json::json()) + .and(warp_utils::json::json::()) + .and(optional_consensus_version_header_filter) .and(network_tx_filter.clone()) - .and(reprocess_send_filter) + .and(reprocess_send_filter.clone()) .and(log_filter.clone()) .then( |task_spawner: TaskSpawner, chain: Arc>, - attestations: Vec, + payload: Value, + fork_name: Option, network_tx: UnboundedSender>, reprocess_tx: Option>, log: Logger| async move { - let attestations = attestations.into_iter().map(Either::Right).collect(); + let attestations = + match crate::publish_attestations::deserialize_attestation_payload::( + payload, fork_name, &log, + ) { + Ok(attestations) => attestations, + Err(err) => { + warn!( + log, + "Unable to deserialize attestation POST request"; + "error" => ?err + ); + return warp::reply::with_status( + warp::reply::json( + &"Unable to deserialize request body".to_string(), + ), + eth2::StatusCode::BAD_REQUEST, + ) + .into_response(); + } + }; + let result = crate::publish_attestations::publish_attestations( task_spawner, chain, @@ -2898,36 +2921,24 @@ pub fn serve( .and(warp::path::end()) .and(task_spawner_filter.clone()) .and(network_globals.clone()) + .and(chain_filter.clone()) .then( |task_spawner: TaskSpawner, - network_globals: Arc>| { + network_globals: Arc>, + chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let enr = network_globals.local_enr(); let p2p_addresses = enr.multiaddr_p2p_tcp(); let discovery_addresses = enr.multiaddr_p2p_udp(); - let meta_data = network_globals.local_metadata.read(); Ok(api_types::GenericResponse::from(api_types::IdentityData { peer_id: network_globals.local_peer_id().to_base58(), enr, p2p_addresses, discovery_addresses, - metadata: api_types::MetaData { - seq_number: *meta_data.seq_number(), - attnets: format!( - "0x{}", - hex::encode(meta_data.attnets().clone().into_bytes()), - ), - syncnets: format!( - "0x{}", - hex::encode( - meta_data - .syncnets() - .cloned() - .unwrap_or_default() - .into_bytes() - ) - ), - }, + metadata: from_meta_data::( + &network_globals.local_metadata, + &chain.spec, + ), })) }) }, @@ -3372,40 +3383,15 @@ pub fn serve( not_synced_filter: Result<(), Rejection>, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { + task_spawner.blocking_response_task(Priority::P0, move || { not_synced_filter?; - let res = if endpoint_version == V2 { - let Some(committee_index) = query.committee_index else { - return Err(warp_utils::reject::custom_bad_request( - "missing committee index".to_string(), - )); - }; - chain.get_aggregated_attestation_electra( - query.slot, - &query.attestation_data_root, - committee_index, - ) - } else if endpoint_version == V1 { - // Do nothing - chain.get_pre_electra_aggregated_attestation_by_slot_and_root( - query.slot, - &query.attestation_data_root, - ) - } else { - return Err(unsupported_version_rejection(endpoint_version)); - }; - res.map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "unable to fetch aggregate: {:?}", - e - )) - })? - .map(api_types::GenericResponse::from) - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "no matching aggregate found".to_string(), - ) - }) + crate::aggregate_attestation::get_aggregate_attestation( + query.slot, + &query.attestation_data_root, + query.committee_index, + endpoint_version, + chain, + ) }) }, ); @@ -4779,7 +4765,7 @@ pub fn serve( .uor(post_beacon_blinded_blocks) .uor(post_beacon_blocks_v2) .uor(post_beacon_blinded_blocks_v2) - .uor(post_beacon_pool_attestations) + .uor(post_beacon_pool_attestations_v1) .uor(post_beacon_pool_attestations_v2) .uor(post_beacon_pool_attester_slashings) .uor(post_beacon_pool_proposer_slashings) @@ -4848,6 +4834,39 @@ pub fn serve( Ok(http_server) } +fn from_meta_data( + meta_data: &RwLock>, + spec: &ChainSpec, +) -> api_types::MetaData { + let meta_data = meta_data.read(); + let format_hex = |bytes: &[u8]| format!("0x{}", hex::encode(bytes)); + + let seq_number = *meta_data.seq_number(); + let attnets = format_hex(&meta_data.attnets().clone().into_bytes()); + let syncnets = format_hex( + &meta_data + .syncnets() + .cloned() + .unwrap_or_default() + .into_bytes(), + ); + + if spec.is_peer_das_scheduled() { + api_types::MetaData::V3(api_types::MetaDataV3 { + seq_number, + attnets, + syncnets, + custody_group_count: meta_data.custody_group_count().cloned().unwrap_or_default(), + }) + } else { + api_types::MetaData::V2(api_types::MetaDataV2 { + seq_number, + attnets, + syncnets, + }) + } +} + /// Publish a message to the libp2p pubsub network. fn publish_pubsub_message( network_tx: &UnboundedSender>, diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index 111dee3cff..10d13e09a5 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -36,14 +36,15 @@ //! attestations and there's no immediate cause for concern. use crate::task_spawner::{Priority, TaskSpawner}; use beacon_chain::{ - validator_monitor::timestamp_now, AttestationError, BeaconChain, BeaconChainError, - BeaconChainTypes, + single_attestation::single_attestation_to_attestation, validator_monitor::timestamp_now, + AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes, }; use beacon_processor::work_reprocessing_queue::{QueuedUnaggregate, ReprocessQueueMessage}; use either::Either; use eth2::types::Failure; use lighthouse_network::PubsubMessage; use network::NetworkMessage; +use serde_json::Value; use slog::{debug, error, warn, Logger}; use std::borrow::Cow; use std::sync::Arc; @@ -52,11 +53,11 @@ use tokio::sync::{ mpsc::{Sender, UnboundedSender}, oneshot, }; -use types::{Attestation, EthSpec, SingleAttestation}; +use types::{Attestation, EthSpec, ForkName, SingleAttestation}; // Error variants are only used in `Debug` and considered `dead_code` by the compiler. #[derive(Debug)] -enum Error { +pub enum Error { Validation(AttestationError), Publication, ForkChoice(#[allow(dead_code)] BeaconChainError), @@ -64,6 +65,7 @@ enum Error { ReprocessDisabled, ReprocessFull, ReprocessTimeout, + InvalidJson(#[allow(dead_code)] serde_json::Error), FailedConversion(#[allow(dead_code)] BeaconChainError), } @@ -74,6 +76,36 @@ enum PublishAttestationResult { Failure(Error), } +#[allow(clippy::type_complexity)] +pub fn deserialize_attestation_payload( + payload: Value, + fork_name: Option, + log: &Logger, +) -> Result, SingleAttestation>>, Error> { + if fork_name.is_some_and(|fork_name| fork_name.electra_enabled()) || fork_name.is_none() { + if fork_name.is_none() { + warn!( + log, + "No Consensus Version header specified."; + ); + } + + Ok(serde_json::from_value::>(payload) + .map_err(Error::InvalidJson)? + .into_iter() + .map(Either::Right) + .collect()) + } else { + Ok( + serde_json::from_value::>>(payload) + .map_err(Error::InvalidJson)? + .into_iter() + .map(Either::Left) + .collect(), + ) + } +} + fn verify_and_publish_attestation( chain: &Arc>, either_attestation: &Either, SingleAttestation>, @@ -151,10 +183,10 @@ fn convert_to_attestation<'a, T: BeaconChainTypes>( chain: &Arc>, attestation: &'a Either, SingleAttestation>, ) -> Result>, Error> { - let a = match attestation { - Either::Left(a) => Cow::Borrowed(a), - Either::Right(single_attestation) => chain - .with_committee_cache( + match attestation { + Either::Left(a) => Ok(Cow::Borrowed(a)), + Either::Right(single_attestation) => { + let conversion_result = chain.with_committee_cache( single_attestation.data.target.root, single_attestation .data @@ -163,26 +195,35 @@ fn convert_to_attestation<'a, T: BeaconChainTypes>( |committee_cache, _| { let Some(committee) = committee_cache.get_beacon_committee( single_attestation.data.slot, - single_attestation.committee_index as u64, + single_attestation.committee_index, ) else { - return Err(BeaconChainError::AttestationError( - types::AttestationError::NoCommitteeForSlotAndIndex { - slot: single_attestation.data.slot, - index: single_attestation.committee_index as u64, - }, - )); + return Ok(Err(AttestationError::NoCommitteeForSlotAndIndex { + slot: single_attestation.data.slot, + index: single_attestation.committee_index, + })); }; - let attestation = - single_attestation.to_attestation::(committee.committee)?; - - Ok(Cow::Owned(attestation)) + Ok(single_attestation_to_attestation::( + single_attestation, + committee.committee, + ) + .map(Cow::Owned)) }, - ) - .map_err(Error::FailedConversion)?, - }; - - Ok(a) + ); + match conversion_result { + Ok(Ok(attestation)) => Ok(attestation), + Ok(Err(e)) => Err(Error::Validation(e)), + // Map the error returned by `with_committee_cache` for unknown blocks into the + // `UnknownHeadBlock` error that is gracefully handled. + Err(BeaconChainError::MissingBeaconBlock(beacon_block_root)) => { + Err(Error::Validation(AttestationError::UnknownHeadBlock { + beacon_block_root, + })) + } + Err(e) => Err(Error::FailedConversion(e)), + } + } + } } pub async fn publish_attestations( @@ -199,7 +240,7 @@ pub async fn publish_attestations( .iter() .map(|att| match att { Either::Left(att) => (att.data().slot, att.committee_index()), - Either::Right(att) => (att.data.slot, Some(att.committee_index as u64)), + Either::Right(att) => (att.data.slot, Some(att.committee_index)), }) .collect::>(); diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 7b48d64e36..fbc92a45cc 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -8,6 +8,7 @@ use beacon_processor::{ }; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; +use lighthouse_network::rpc::methods::MetaDataV3; use lighthouse_network::{ discv5::enr::CombinedKey, libp2p::swarm::{ @@ -150,11 +151,21 @@ pub async fn create_api_server_with_config( let (network_senders, network_receivers) = NetworkSenders::new(); // Default metadata - let meta_data = MetaData::V2(MetaDataV2 { - seq_number: SEQ_NUMBER, - attnets: EnrAttestationBitfield::::default(), - syncnets: EnrSyncCommitteeBitfield::::default(), - }); + let meta_data = if chain.spec.is_peer_das_scheduled() { + MetaData::V3(MetaDataV3 { + seq_number: SEQ_NUMBER, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + custody_group_count: chain.spec.custody_requirement, + }) + } else { + MetaData::V2(MetaDataV2 { + seq_number: SEQ_NUMBER, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }) + }; + let enr_key = CombinedKey::generate_secp256k1(); let enr = Enr::builder().build(&enr_key).unwrap(); let network_config = Arc::new(NetworkConfig::default()); diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index db4ef00257..1baa71699c 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,4 +1,3 @@ -use beacon_chain::blob_verification::GossipVerifiedBlob; use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, GossipVerifiedBlock, IntoGossipVerifiedBlock, @@ -7,9 +6,10 @@ use eth2::reqwest::StatusCode; use eth2::types::{BroadcastValidation, PublishBlockRequest}; use http_api::test_utils::InteractiveTester; use http_api::{publish_blinded_block, publish_block, reconstruct_block, Config, ProvenancedBlock}; +use std::collections::HashSet; use std::sync::Arc; use types::{ - BlobSidecar, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot, + ColumnIndex, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot, }; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -17,6 +17,8 @@ use warp_utils::reject::CustomBadRequest; type E = MainnetEthSpec; /* + * TODO(fulu): write PeerDAS equivalent tests for these. + * * We have the following test cases, which are duplicated for the blinded variant of the route: * * - `broadcast_validation=gossip` @@ -1375,7 +1377,7 @@ pub async fn block_seen_on_gossip_without_blobs() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1437,7 +1439,7 @@ pub async fn block_seen_on_gossip_with_some_blobs() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1464,8 +1466,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() { blobs.0.len() ); - let partial_kzg_proofs = vec![*blobs.0.first().unwrap()]; - let partial_blobs = vec![blobs.1.first().unwrap().clone()]; + let partial_kzg_proofs = [*blobs.0.first().unwrap()]; + let partial_blobs = [blobs.1.first().unwrap().clone()]; // Simulate the block being seen on gossip. block @@ -1474,21 +1476,15 @@ pub async fn block_seen_on_gossip_with_some_blobs() { .unwrap(); // Simulate some of the blobs being seen on gossip. - for (i, (kzg_proof, blob)) in partial_kzg_proofs - .into_iter() - .zip(partial_blobs) - .enumerate() - { - let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); - let gossip_blob = - GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); - tester - .harness - .chain - .process_gossip_blob(gossip_blob) - .await - .unwrap(); - } + tester + .harness + .process_gossip_blobs_or_columns( + &block, + partial_blobs.iter(), + partial_kzg_proofs.iter(), + Some(get_custody_columns(&tester)), + ) + .await; // It should not yet be added to fork choice because all blobs have not been seen. assert!(!tester @@ -1523,7 +1519,7 @@ pub async fn blobs_seen_on_gossip_without_block() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1546,22 +1542,15 @@ pub async fn blobs_seen_on_gossip_without_block() { let (kzg_proofs, blobs) = blobs.expect("should have some blobs"); // Simulate the blobs being seen on gossip. - for (i, (kzg_proof, blob)) in kzg_proofs - .clone() - .into_iter() - .zip(blobs.clone()) - .enumerate() - { - let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); - let gossip_blob = - GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); - tester - .harness - .chain - .process_gossip_blob(gossip_blob) - .await - .unwrap(); - } + tester + .harness + .process_gossip_blobs_or_columns( + &block, + blobs.iter(), + kzg_proofs.iter(), + Some(get_custody_columns(&tester)), + ) + .await; // It should not yet be added to fork choice because the block has not been seen. assert!(!tester @@ -1596,7 +1585,7 @@ pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1620,22 +1609,15 @@ pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { assert!(!blobs.is_empty()); // Simulate the blobs being seen on gossip. - for (i, (kzg_proof, blob)) in kzg_proofs - .clone() - .into_iter() - .zip(blobs.clone()) - .enumerate() - { - let sidecar = Arc::new(BlobSidecar::new(i, blob, &block, kzg_proof).unwrap()); - let gossip_blob = - GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); - tester - .harness - .chain - .process_gossip_blob(gossip_blob) - .await - .unwrap(); - } + tester + .harness + .process_gossip_blobs_or_columns( + &block, + blobs.iter(), + kzg_proofs.iter(), + Some(get_custody_columns(&tester)), + ) + .await; // It should not yet be added to fork choice because the block has not been seen. assert!(!tester @@ -1672,7 +1654,7 @@ pub async fn slashable_blobs_seen_on_gossip_cause_failure() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new(Some(spec), validator_count).await; // Create some chain depth. @@ -1697,17 +1679,15 @@ pub async fn slashable_blobs_seen_on_gossip_cause_failure() { let (kzg_proofs_b, blobs_b) = blobs_b.expect("should have some blobs"); // Simulate the blobs of block B being seen on gossip. - for (i, (kzg_proof, blob)) in kzg_proofs_b.into_iter().zip(blobs_b).enumerate() { - let sidecar = Arc::new(BlobSidecar::new(i, blob, &block_b, kzg_proof).unwrap()); - let gossip_blob = - GossipVerifiedBlob::new(sidecar, i as u64, &tester.harness.chain).unwrap(); - tester - .harness - .chain - .process_gossip_blob(gossip_blob) - .await - .unwrap(); - } + tester + .harness + .process_gossip_blobs_or_columns( + &block_b, + blobs_b.iter(), + kzg_proofs_b.iter(), + Some(get_custody_columns(&tester)), + ) + .await; // It should not yet be added to fork choice because block B has not been seen. assert!(!tester @@ -1742,7 +1722,7 @@ pub async fn duplicate_block_status_code() { // `validator_count // 32`. let validator_count = 64; let num_initial: u64 = 31; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + let spec = ForkName::latest_stable().make_genesis_spec(E::default_spec()); let duplicate_block_status_code = StatusCode::IM_A_TEAPOT; let tester = InteractiveTester::::new_with_initializer_and_mutator( Some(spec), @@ -1804,3 +1784,13 @@ fn assert_server_message_error(error_response: eth2::Error, expected_message: St }; assert_eq!(err.message, expected_message); } + +fn get_custody_columns(tester: &InteractiveTester) -> HashSet { + tester + .ctx + .network_globals + .as_ref() + .unwrap() + .sampling_columns + .clone() +} diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index d6b8df33b3..10e1d01536 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -5,7 +5,7 @@ use beacon_chain::{ }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use execution_layer::test_utils::generate_genesis_header; -use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use genesis::{bls_withdrawal_credentials, InteropGenesisBuilder}; use http_api::test_utils::*; use std::collections::HashSet; use types::{ @@ -346,35 +346,46 @@ fn assert_server_indexed_error(error: eth2::Error, status_code: u16, indices: Ve #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn bls_to_execution_changes_update_all_around_capella_fork() { - let validator_count = 128; + const VALIDATOR_COUNT: usize = 128; let fork_epoch = Epoch::new(2); let spec = capella_spec(fork_epoch); let max_bls_to_execution_changes = E::max_bls_to_execution_changes(); // Use a genesis state with entirely BLS withdrawal credentials. - // Offset keypairs by `validator_count` to create keys distinct from the signing keys. - let validator_keypairs = generate_deterministic_keypairs(validator_count); - let withdrawal_keypairs = (0..validator_count) - .map(|i| Some(generate_deterministic_keypair(i + validator_count))) - .collect::>(); - let withdrawal_credentials = withdrawal_keypairs - .iter() - .map(|keypair| bls_withdrawal_credentials(&keypair.as_ref().unwrap().pk, &spec)) + // Offset keypairs by `VALIDATOR_COUNT` to create keys distinct from the signing keys. + let validator_keypairs = generate_deterministic_keypairs(VALIDATOR_COUNT); + let withdrawal_keypairs = (0..VALIDATOR_COUNT) + .map(|i| Some(generate_deterministic_keypair(i + VALIDATOR_COUNT))) .collect::>(); + + fn withdrawal_credentials_fn<'a>( + index: usize, + _: &'a types::PublicKey, + spec: &'a ChainSpec, + ) -> Hash256 { + // It is a bit inefficient to regenerate the whole keypair here, but this is a workaround. + // `InteropGenesisBuilder` requires the `withdrawal_credentials_fn` to have + // a `'static` lifetime. + let keypair = generate_deterministic_keypair(index + VALIDATOR_COUNT); + bls_withdrawal_credentials(&keypair.pk, spec) + } + let header = generate_genesis_header(&spec, true); - let genesis_state = interop_genesis_state_with_withdrawal_credentials( - &validator_keypairs, - &withdrawal_credentials, - HARNESS_GENESIS_TIME, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - header, - &spec, - ) - .unwrap(); + + let genesis_state = InteropGenesisBuilder::new() + .set_opt_execution_payload_header(header) + .set_withdrawal_credentials_fn(Box::new(withdrawal_credentials_fn)) + .build_genesis_state( + &validator_keypairs, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + &spec, + ) + .unwrap(); let tester = InteractiveTester::::new_with_initializer_and_mutator( Some(spec.clone()), - validator_count, + VALIDATOR_COUNT, Some(Box::new(|harness_builder| { harness_builder .keypairs(validator_keypairs) @@ -421,7 +432,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { let pubkey = &harness.get_withdrawal_keypair(validator_index).pk; // And the wrong secret key. let secret_key = &harness - .get_withdrawal_keypair((validator_index + 1) % validator_count as u64) + .get_withdrawal_keypair((validator_index + 1) % VALIDATOR_COUNT as u64) .sk; harness.make_bls_to_execution_change_with_keys( validator_index, @@ -433,7 +444,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { .collect::>(); // Submit some changes before Capella. Just enough to fill two blocks. - let num_pre_capella = validator_count / 4; + let num_pre_capella = VALIDATOR_COUNT / 4; let blocks_filled_pre_capella = 2; assert_eq!( num_pre_capella, @@ -488,7 +499,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { ); // Add Capella blocks which should be full of BLS to execution changes. - for i in 0..validator_count / max_bls_to_execution_changes { + for i in 0..VALIDATOR_COUNT / max_bls_to_execution_changes { let head_block_root = harness.extend_slots(1).await; let head_block = harness .chain @@ -534,7 +545,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { assert_server_indexed_error( error, 400, - (validator_count..3 * validator_count).collect(), + (VALIDATOR_COUNT..3 * VALIDATOR_COUNT).collect(), ); } } diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 60a4c50783..bb3086945b 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -5,6 +5,7 @@ use beacon_chain::{ ChainConfig, }; use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage; +use either::Either; use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; @@ -906,9 +907,11 @@ async fn queue_attestations_from_http() { .flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att)) .collect::>(); + let attestations = Either::Right(single_attestations); + tokio::spawn(async move { client - .post_beacon_pool_attestations_v2(&single_attestations, fork_name) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .expect("attestations should be processed successfully") }) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index bc5173c2ab..36410a2581 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3,6 +3,7 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, }; +use either::Either; use eth2::{ mixin::{RequestAccept, ResponseForkName, ResponseOptional}, reqwest::RequestBuilder, @@ -1811,12 +1812,25 @@ impl ApiTester { self } - pub async fn test_post_beacon_pool_attestations_valid_v1(mut self) -> Self { + pub async fn test_post_beacon_pool_attestations_valid(mut self) -> Self { self.client .post_beacon_pool_attestations_v1(self.attestations.as_slice()) .await .unwrap(); + let fork_name = self + .attestations + .first() + .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) + .unwrap(); + + let attestations = Either::Left(self.attestations.clone()); + + self.client + .post_beacon_pool_attestations_v2::(attestations, fork_name) + .await + .unwrap(); + assert!( self.network_rx.network_recv.recv().await.is_some(), "valid attestation should be sent to network" @@ -1834,8 +1848,10 @@ impl ApiTester { .first() .map(|att| self.chain.spec.fork_name_at_slot::(att.data.slot)) .unwrap(); + + let attestations = Either::Right(self.single_attestations.clone()); self.client - .post_beacon_pool_attestations_v2(self.single_attestations.as_slice(), fork_name) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap(); assert!( @@ -1901,10 +1917,10 @@ impl ApiTester { .first() .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) .unwrap(); - + let attestations = Either::Right(attestations); let err_v2 = self .client - .post_beacon_pool_attestations_v2(attestations.as_slice(), fork_name) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap_err(); @@ -1934,7 +1950,7 @@ impl ApiTester { .sync_committee_period(&self.chain.spec) .unwrap(); - let result = match self + match self .client .get_beacon_light_client_updates::(current_sync_committee_period, 1) .await @@ -1955,7 +1971,6 @@ impl ApiTester { .unwrap(); assert_eq!(1, expected.len()); - assert_eq!(result.clone().unwrap().len(), expected.len()); self } @@ -1980,7 +1995,6 @@ impl ApiTester { .get_light_client_bootstrap(&self.chain.store, &block_root, 1u64, &self.chain.spec); assert!(expected.is_ok()); - assert_eq!(result.unwrap().data, expected.unwrap().unwrap().0); self @@ -2363,11 +2377,11 @@ impl ApiTester { enr: self.local_enr.clone(), p2p_addresses: self.local_enr.multiaddr_p2p_tcp(), discovery_addresses: self.local_enr.multiaddr_p2p_udp(), - metadata: eth2::types::MetaData { + metadata: MetaData::V2(MetaDataV2 { seq_number: 0, attnets: "0x0000000000000000".to_string(), syncnets: "0x00".to_string(), - }, + }), }; assert_eq!(result, expected); @@ -6063,9 +6077,9 @@ impl ApiTester { .chain .spec .fork_name_at_slot::(self.chain.slot().unwrap()); - + let attestations = Either::Right(self.single_attestations.clone()); self.client - .post_beacon_pool_attestations_v2(&self.single_attestations, fork_name) + .post_beacon_pool_attestations_v2::(attestations, fork_name) .await .unwrap(); @@ -6384,10 +6398,10 @@ async fn post_beacon_blocks_duplicate() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_pools_post_attestations_valid_v1() { +async fn beacon_pools_post_attestations_valid() { ApiTester::new() .await - .test_post_beacon_pool_attestations_valid_v1() + .test_post_beacon_pool_attestations_valid() .await; } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 959398fb2d..bccf9b9f29 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -21,8 +21,9 @@ futures = { workspace = true } gossipsub = { workspace = true } hex = { workspace = true } itertools = { workspace = true } -libp2p-mplex = "0.42" +libp2p-mplex = "0.43" lighthouse_version = { workspace = true } +local-ip-address = "0.6" lru = { workspace = true } lru_cache = { workspace = true } metrics = { workspace = true } @@ -51,7 +52,7 @@ unused_port = { workspace = true } void = "1.0.2" [dependencies.libp2p] -version = "0.54" +version = "0.55" default-features = false features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index 61f5730c08..239caae47a 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -26,7 +26,7 @@ futures-timer = "3.0.2" getrandom = "0.2.12" hashlink = { workspace = true } hex_fmt = "0.3.0" -libp2p = { version = "0.54", default-features = false } +libp2p = { version = "0.55", default-features = false } prometheus-client = "0.22.0" quick-protobuf = "0.8" quick-protobuf-codec = "0.3" diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index 6528e737a3..7eb35cc49b 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -1841,6 +1841,30 @@ where peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); } self.mcache.observe_duplicate(&msg_id, propagation_source); + // track metrics for the source of the duplicates + if let Some(metrics) = self.metrics.as_mut() { + if self + .mesh + .get(&message.topic) + .is_some_and(|peers| peers.contains(propagation_source)) + { + // duplicate was received from a mesh peer + metrics.mesh_duplicates(&message.topic); + } else if self + .gossip_promises + .contains_peer(&msg_id, propagation_source) + { + // duplicate was received from an iwant request + metrics.iwant_duplicates(&message.topic); + } else { + tracing::warn!( + messsage=%msg_id, + peer=%propagation_source, + topic=%message.topic, + "Peer should not have sent message" + ); + } + } return; } diff --git a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs index 3f72709245..ce1dee2a72 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/gossip_promises.rs @@ -41,6 +41,13 @@ impl GossipPromises { self.promises.contains_key(message) } + /// Returns true if the message id exists in the promises and contains the given peer. + pub(crate) fn contains_peer(&self, message: &MessageId, peer: &PeerId) -> bool { + self.promises + .get(message) + .is_some_and(|peers| peers.contains_key(peer)) + } + ///Get the peers we sent IWANT the input message id. pub(crate) fn peers_for_message(&self, message_id: &MessageId) -> Vec { self.promises diff --git a/beacon_node/lighthouse_network/gossipsub/src/handler.rs b/beacon_node/lighthouse_network/gossipsub/src/handler.rs index d89013eb2f..0f25db6e3d 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/handler.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/handler.rs @@ -194,7 +194,6 @@ impl EnabledHandler { &mut self, FullyNegotiatedOutbound { protocol, .. }: FullyNegotiatedOutbound< ::OutboundProtocol, - ::OutboundOpenInfo, >, ) { let (substream, peer_kind) = protocol; @@ -217,7 +216,7 @@ impl EnabledHandler { ) -> Poll< ConnectionHandlerEvent< ::OutboundProtocol, - ::OutboundOpenInfo, + (), ::ToBehaviour, >, > { @@ -423,7 +422,7 @@ impl ConnectionHandler for Handler { type OutboundOpenInfo = (); type OutboundProtocol = ProtocolConfig; - fn listen_protocol(&self) -> SubstreamProtocol { + fn listen_protocol(&self) -> SubstreamProtocol { match self { Handler::Enabled(handler) => { SubstreamProtocol::new(either::Either::Left(handler.listen_protocol.clone()), ()) @@ -458,9 +457,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { match self { Handler::Enabled(handler) => handler.poll(cx), Handler::Disabled(DisabledHandler::ProtocolUnsupported { peer_kind_sent }) => { @@ -479,12 +476,7 @@ impl ConnectionHandler for Handler { fn on_connection_event( &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, + event: ConnectionEvent, ) { match self { Handler::Enabled(handler) => { @@ -521,7 +513,7 @@ impl ConnectionHandler for Handler { }) => match protocol { Either::Left(protocol) => handler.on_fully_negotiated_inbound(protocol), #[allow(unreachable_patterns)] - Either::Right(v) => void::unreachable(v), + Either::Right(v) => libp2p::core::util::unreachable(v), }, ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { handler.on_fully_negotiated_outbound(fully_negotiated_outbound) diff --git a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs index d3ca6c299e..2989f95a26 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/metrics.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/metrics.rs @@ -194,6 +194,12 @@ pub(crate) struct Metrics { /// Number of full messages we received that we previously sent a IDONTWANT for. idontwant_messages_ignored_per_topic: Family, + /// Count of duplicate messages we have received from mesh peers for a given topic. + mesh_duplicates: Family, + + /// Count of duplicate messages we have received from by requesting them over iwant for a given topic. + iwant_duplicates: Family, + /// The size of the priority queue. priority_queue_size: Histogram, /// The size of the non-priority queue. @@ -359,6 +365,16 @@ impl Metrics { "IDONTWANT messages that were sent but we received the full message regardless" ); + let mesh_duplicates = register_family!( + "mesh_duplicates_per_topic", + "Count of duplicate messages received from mesh peers per topic" + ); + + let iwant_duplicates = register_family!( + "iwant_duplicates_per_topic", + "Count of duplicate messages received from non-mesh peers that we sent iwants for" + ); + let idontwant_bytes = { let metric = Counter::default(); registry.register( @@ -425,6 +441,8 @@ impl Metrics { idontwant_msgs_ids, idontwant_messages_sent_per_topic, idontwant_messages_ignored_per_topic, + mesh_duplicates, + iwant_duplicates, priority_queue_size, non_priority_queue_size, } @@ -597,6 +615,20 @@ impl Metrics { } } + /// Register a duplicate message received from a mesh peer. + pub(crate) fn mesh_duplicates(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.mesh_duplicates.get_or_create(topic).inc(); + } + } + + /// Register a duplicate message received from a non-mesh peer on an iwant request. + pub(crate) fn iwant_duplicates(&mut self, topic: &TopicHash) { + if self.register_topic(topic).is_ok() { + self.iwant_duplicates.get_or_create(topic).inc(); + } + } + pub(crate) fn register_msg_validation( &mut self, topic: &TopicHash, diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 55c1dbf491..5a6628439e 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -6,6 +6,7 @@ use directory::{ DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, }; use libp2p::Multiaddr; +use local_ip_address::local_ipv6; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::net::{Ipv4Addr, Ipv6Addr}; @@ -266,6 +267,18 @@ impl Config { } } + /// A helper function to check if the local host has a globally routeable IPv6 address. If so, + /// returns true. + pub fn is_ipv6_supported() -> bool { + // If IPv6 is supported + let Ok(std::net::IpAddr::V6(local_ip)) = local_ipv6() else { + return false; + }; + + // If its globally routable, return true + is_global_ipv6(&local_ip) + } + pub fn listen_addrs(&self) -> &ListenAddress { &self.listen_addresses } @@ -354,7 +367,7 @@ impl Default for Config { topics: Vec::new(), proposer_only: false, metrics_enabled: false, - enable_light_client_server: false, + enable_light_client_server: true, outbound_rate_limiter_config: None, invalid_block_storage: None, inbound_rate_limiter_config: None, diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 062a119e0d..8067711954 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -339,9 +339,9 @@ mod test { type E = MainnetEthSpec; - fn make_eip7594_spec() -> ChainSpec { + fn make_fulu_spec() -> ChainSpec { let mut spec = E::default_spec(); - spec.eip7594_fork_epoch = Some(Epoch::new(10)); + spec.fulu_fork_epoch = Some(Epoch::new(10)); spec } @@ -359,7 +359,7 @@ mod test { subscribe_all_data_column_subnets: false, ..NetworkConfig::default() }; - let spec = make_eip7594_spec(); + let spec = make_fulu_spec(); let enr = build_enr_with_config(config, &spec).0; @@ -375,7 +375,7 @@ mod test { subscribe_all_data_column_subnets: true, ..NetworkConfig::default() }; - let spec = make_eip7594_spec(); + let spec = make_fulu_spec(); let enr = build_enr_with_config(config, &spec).0; assert_eq!( diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 578bb52b51..33c7775ae2 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -994,7 +994,7 @@ impl NetworkBehaviour for Discovery { &mut self, _peer_id: PeerId, _connection_id: ConnectionId, - _event: void::Void, + _event: std::convert::Infallible, ) { } diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 9fd059df85..abafb200be 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -37,7 +37,10 @@ impl NetworkBehaviour for PeerManager { // no events from the dummy handler } - fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> { // perform the heartbeat when necessary while self.heartbeat.poll_tick(cx).is_ready() { self.heartbeat(); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 8981a75aed..2bf35b0e35 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -485,17 +485,9 @@ fn context_bytes( RpcSuccessResponse::BlobsByRange(_) | RpcSuccessResponse::BlobsByRoot(_) => { return fork_context.to_context_bytes(ForkName::Deneb); } - RpcSuccessResponse::DataColumnsByRoot(d) - | RpcSuccessResponse::DataColumnsByRange(d) => { - // TODO(das): Remove deneb fork after `peerdas-devnet-2`. - return if matches!( - fork_context.spec.fork_name_at_slot::(d.slot()), - ForkName::Deneb - ) { - fork_context.to_context_bytes(ForkName::Deneb) - } else { - fork_context.to_context_bytes(ForkName::Electra) - }; + RpcSuccessResponse::DataColumnsByRoot(_) + | RpcSuccessResponse::DataColumnsByRange(_) => { + return fork_context.to_context_bytes(ForkName::Fulu); } RpcSuccessResponse::LightClientBootstrap(lc_bootstrap) => { return lc_bootstrap @@ -576,7 +568,7 @@ fn handle_rpc_request( BlocksByRootRequest::V2(BlocksByRootRequestV2 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, - spec.max_request_blocks as usize, + spec.max_request_blocks(current_fork), )?, }), ))), @@ -584,32 +576,18 @@ fn handle_rpc_request( BlocksByRootRequest::V1(BlocksByRootRequestV1 { block_roots: RuntimeVariableList::from_ssz_bytes( decoded_buffer, - spec.max_request_blocks as usize, + spec.max_request_blocks(current_fork), )?, }), ))), - SupportedProtocol::BlobsByRangeV1 => { - let req = BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?; - let max_requested_blobs = req - .count - .saturating_mul(spec.max_blobs_per_block_by_fork(current_fork)); - // TODO(pawan): change this to max_blobs_per_rpc_request in the alpha10 PR - if max_requested_blobs > spec.max_request_blob_sidecars { - return Err(RPCError::ErrorResponse( - RpcErrorResponse::InvalidRequest, - format!( - "requested exceeded limit. allowed: {}, requested: {}", - spec.max_request_blob_sidecars, max_requested_blobs - ), - )); - } - Ok(Some(RequestType::BlobsByRange(req))) - } + SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange( + BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), SupportedProtocol::BlobsByRootV1 => { Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest { blob_ids: RuntimeVariableList::from_ssz_bytes( decoded_buffer, - spec.max_request_blob_sidecars as usize, + spec.max_request_blob_sidecars(current_fork), )?, }))) } @@ -744,10 +722,7 @@ fn handle_rpc_response( }, SupportedProtocol::DataColumnsByRootV1 => match fork_name { Some(fork_name) => { - // TODO(das): PeerDAS is currently supported for both deneb and electra. This check - // does not advertise the topic on deneb, simply allows it to decode it. Advertise - // logic is in `SupportedTopic::currently_supported`. - if fork_name.deneb_enabled() { + if fork_name.fulu_enabled() { Ok(Some(RpcSuccessResponse::DataColumnsByRoot(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) @@ -768,7 +743,7 @@ fn handle_rpc_response( }, SupportedProtocol::DataColumnsByRangeV1 => match fork_name { Some(fork_name) => { - if fork_name.deneb_enabled() { + if fork_name.fulu_enabled() { Ok(Some(RpcSuccessResponse::DataColumnsByRange(Arc::new( DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, )))) @@ -959,9 +934,10 @@ mod tests { use crate::rpc::protocol::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ - blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, - BeaconBlockBellatrix, DataColumnIdentifier, EmptyBlock, Epoch, FixedBytesExtended, - FullPayload, Signature, Slot, + blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, BeaconBlock, BeaconBlockAltair, + BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, DataColumnIdentifier, EmptyBlock, + Epoch, FixedBytesExtended, FullPayload, KzgCommitment, KzgProof, Signature, + SignedBeaconBlockHeader, Slot, }; type Spec = types::MainnetEthSpec; @@ -1012,7 +988,17 @@ mod tests { } fn empty_data_column_sidecar() -> Arc> { - Arc::new(DataColumnSidecar::empty()) + Arc::new(DataColumnSidecar { + index: 0, + column: VariableList::new(vec![Cell::::default()]).unwrap(), + kzg_commitments: VariableList::new(vec![KzgCommitment::empty_for_testing()]).unwrap(), + kzg_proofs: VariableList::new(vec![KzgProof::empty()]).unwrap(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader::empty(), + signature: Signature::empty(), + }, + kzg_commitments_inclusion_proof: Default::default(), + }) } /// Bellatrix block with length < max_rpc_size. @@ -1097,21 +1083,21 @@ mod tests { } } - fn bbroot_request_v1(spec: &ChainSpec) -> BlocksByRootRequest { - BlocksByRootRequest::new_v1(vec![Hash256::zero()], spec) + fn bbroot_request_v1(fork_name: ForkName) -> BlocksByRootRequest { + BlocksByRootRequest::new_v1(vec![Hash256::zero()], &fork_context(fork_name)) } - fn bbroot_request_v2(spec: &ChainSpec) -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![Hash256::zero()], spec) + fn bbroot_request_v2(fork_name: ForkName) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![Hash256::zero()], &fork_context(fork_name)) } - fn blbroot_request(spec: &ChainSpec) -> BlobsByRootRequest { + fn blbroot_request(fork_name: ForkName) -> BlobsByRootRequest { BlobsByRootRequest::new( vec![BlobIdentifier { block_root: Hash256::zero(), index: 0, }], - spec, + &fork_context(fork_name), ) } @@ -1909,7 +1895,8 @@ mod tests { #[test] fn test_encode_then_decode_request() { - let chain_spec = Spec::default_spec(); + let fork_context = fork_context(ForkName::Electra); + let chain_spec = fork_context.spec.clone(); let requests: &[RequestType] = &[ RequestType::Ping(ping_message()), @@ -1917,21 +1904,33 @@ mod tests { RequestType::Goodbye(GoodbyeReason::Fault), RequestType::BlocksByRange(bbrange_request_v1()), RequestType::BlocksByRange(bbrange_request_v2()), - RequestType::BlocksByRoot(bbroot_request_v1(&chain_spec)), - RequestType::BlocksByRoot(bbroot_request_v2(&chain_spec)), RequestType::MetaData(MetadataRequest::new_v1()), RequestType::BlobsByRange(blbrange_request()), - RequestType::BlobsByRoot(blbroot_request(&chain_spec)), RequestType::DataColumnsByRange(dcbrange_request()), RequestType::DataColumnsByRoot(dcbroot_request(&chain_spec)), RequestType::MetaData(MetadataRequest::new_v2()), ]; - for req in requests.iter() { for fork_name in ForkName::list_all() { encode_then_decode_request(req.clone(), fork_name, &chain_spec); } } + + // Request types that have different length limits depending on the fork + // Handled separately to have consistent `ForkName` across request and responses + let fork_dependent_requests = |fork_name| { + [ + RequestType::BlobsByRoot(blbroot_request(fork_name)), + RequestType::BlocksByRoot(bbroot_request_v1(fork_name)), + RequestType::BlocksByRoot(bbroot_request_v2(fork_name)), + ] + }; + for fork_name in ForkName::list_all() { + let requests = fork_dependent_requests(fork_name); + for req in requests { + encode_then_decode_request(req.clone(), fork_name, &chain_spec); + } + } } /// Test a malicious snappy encoding for a V1 `Status` message where the attacker diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 3a008df023..03203fcade 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -353,6 +353,7 @@ where !matches!(self.state, HandlerState::Deactivated) } + #[allow(deprecated)] fn poll( &mut self, cx: &mut Context<'_>, @@ -814,6 +815,7 @@ where Poll::Pending } + #[allow(deprecated)] fn on_connection_event( &mut self, event: ConnectionEvent< @@ -855,6 +857,45 @@ where } let (req, substream) = substream; + let current_fork = self.fork_context.current_fork(); + let spec = &self.fork_context.spec; + + match &req { + RequestType::BlocksByRange(request) => { + let max_allowed = spec.max_request_blocks(current_fork) as u64; + if *request.count() > max_allowed { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: Protocol::BlocksByRange, + error: RPCError::InvalidData(format!( + "requested exceeded limit. allowed: {}, requested: {}", + max_allowed, + request.count() + )), + })); + return self.shutdown(None); + } + } + RequestType::BlobsByRange(request) => { + let max_requested_blobs = request + .count + .saturating_mul(spec.max_blobs_per_block_by_fork(current_fork)); + let max_allowed = spec.max_request_blob_sidecars(current_fork) as u64; + if max_requested_blobs > max_allowed { + self.events_out.push(HandlerEvent::Err(HandlerErr::Inbound { + id: self.current_inbound_substream_id, + proto: Protocol::BlobsByRange, + error: RPCError::InvalidData(format!( + "requested exceeded limit. allowed: {}, requested: {}", + max_allowed, max_requested_blobs + )), + })); + return self.shutdown(None); + } + } + _ => {} + }; + let max_responses = req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 958041c53f..2f6200a836 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -15,12 +15,12 @@ use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; -use types::ForkName; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; +use types::{ForkContext, ForkName}; /// Maximum length of error message. pub type MaxErrorLen = U256; @@ -411,6 +411,27 @@ impl OldBlocksByRangeRequest { } } +impl From for OldBlocksByRangeRequest { + fn from(req: BlocksByRangeRequest) -> Self { + match req { + BlocksByRangeRequest::V1(ref req) => { + OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { + start_slot: req.start_slot, + count: req.count, + step: 1, + }) + } + BlocksByRangeRequest::V2(ref req) => { + OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { + start_slot: req.start_slot, + count: req.count, + step: 1, + }) + } + } + } +} + /// Request a number of beacon block bodies from a peer. #[superstruct(variants(V1, V2), variant_attributes(derive(Clone, Debug, PartialEq)))] #[derive(Clone, Debug, PartialEq)] @@ -420,15 +441,19 @@ pub struct BlocksByRootRequest { } impl BlocksByRootRequest { - pub fn new(block_roots: Vec, spec: &ChainSpec) -> Self { - let block_roots = - RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize); + pub fn new(block_roots: Vec, fork_context: &ForkContext) -> Self { + let max_request_blocks = fork_context + .spec + .max_request_blocks(fork_context.current_fork()); + let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks); Self::V2(BlocksByRootRequestV2 { block_roots }) } - pub fn new_v1(block_roots: Vec, spec: &ChainSpec) -> Self { - let block_roots = - RuntimeVariableList::from_vec(block_roots, spec.max_request_blocks as usize); + pub fn new_v1(block_roots: Vec, fork_context: &ForkContext) -> Self { + let max_request_blocks = fork_context + .spec + .max_request_blocks(fork_context.current_fork()); + let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks); Self::V1(BlocksByRootRequestV1 { block_roots }) } } @@ -441,9 +466,11 @@ pub struct BlobsByRootRequest { } impl BlobsByRootRequest { - pub fn new(blob_ids: Vec, spec: &ChainSpec) -> Self { - let blob_ids = - RuntimeVariableList::from_vec(blob_ids, spec.max_request_blob_sidecars as usize); + pub fn new(blob_ids: Vec, fork_context: &ForkContext) -> Self { + let max_request_blob_sidecars = fork_context + .spec + .max_request_blob_sidecars(fork_context.current_fork()); + let blob_ids = RuntimeVariableList::from_vec(blob_ids, max_request_blob_sidecars); Self { blob_ids } } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 80f15c9445..eac7d67490 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -554,9 +554,11 @@ impl ProtocolId { Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), - Protocol::DataColumnsByRoot => rpc_data_column_limits::(fork_context.current_fork()), + Protocol::DataColumnsByRoot => { + rpc_data_column_limits::(fork_context.current_fork(), &fork_context.spec) + } Protocol::DataColumnsByRange => { - rpc_data_column_limits::(fork_context.current_fork()) + rpc_data_column_limits::(fork_context.current_fork(), &fork_context.spec) } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), @@ -637,13 +639,10 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -// TODO(das): fix hardcoded max here -pub fn rpc_data_column_limits(fork_name: ForkName) -> RpcLimits { +pub fn rpc_data_column_limits(fork_name: ForkName, spec: &ChainSpec) -> RpcLimits { RpcLimits::new( - DataColumnSidecar::::empty().as_ssz_bytes().len(), - DataColumnSidecar::::max_size( - E::default_spec().max_blobs_per_block_by_fork(fork_name) as usize - ), + DataColumnSidecar::::min_size(), + DataColumnSidecar::::max_size(spec.max_blobs_per_block_by_fork(fork_name) as usize), ) } diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index e0c8593f29..ae63e5cdb5 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -217,7 +217,7 @@ mod tests { use crate::rpc::rate_limiter::Quota; use crate::rpc::self_limiter::SelfRateLimiter; use crate::rpc::{Ping, Protocol, RequestType}; - use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId}; + use crate::service::api_types::{AppRequestId, RequestId, SingleLookupReqId, SyncRequestId}; use libp2p::PeerId; use std::time::Duration; use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot}; @@ -238,12 +238,16 @@ mod tests { let mut limiter: SelfRateLimiter = SelfRateLimiter::new(config, fork_context, log).unwrap(); let peer_id = PeerId::random(); + let lookup_id = 0; for i in 1..=5u32 { let _ = limiter.allows( peer_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { - id: i, + RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + id: SingleLookupReqId { + lookup_id, + req_id: i, + }, })), RequestType::Ping(Ping { data: i as u64 }), ); @@ -261,9 +265,9 @@ mod tests { for i in 2..=5u32 { assert!(matches!( iter.next().unwrap().request_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { - id, - })) if id == i + RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + id: SingleLookupReqId { req_id, .. }, + })) if req_id == i, )); } @@ -286,9 +290,9 @@ mod tests { for i in 3..=5 { assert!(matches!( iter.next().unwrap().request_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { - id - })) if id == i + RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + id: SingleLookupReqId { req_id, .. }, + })) if req_id == i, )); } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 85fabbb0c3..e69c7aa5f7 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,15 +1,14 @@ -use std::sync::Arc; - -use libp2p::swarm::ConnectionId; -use types::{ - BlobSidecar, DataColumnSidecar, EthSpec, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, -}; - use crate::rpc::{ methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}, SubstreamId, }; +use libp2p::swarm::ConnectionId; +use std::fmt::{Display, Formatter}; +use std::sync::Arc; +use types::{ + BlobSidecar, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, + LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, +}; /// Identifier of requests sent by a peer. pub type PeerRequestId = (ConnectionId, SubstreamId); @@ -31,8 +30,12 @@ pub enum SyncRequestId { SingleBlob { id: SingleLookupReqId }, /// Request searching for a set of data columns given a hash and list of column indices. DataColumnsByRoot(DataColumnsByRootRequestId), - /// Range request that is composed by both a block range request and a blob range request. - RangeBlockAndBlobs { id: Id }, + /// Blocks by range request + BlocksByRange(BlocksByRangeRequestId), + /// Blobs by range request + BlobsByRange(BlobsByRangeRequestId), + /// Data columns by range request + DataColumnsByRange(DataColumnsByRangeRequestId), } /// Request ID for data_columns_by_root requests. Block lookups do not issue this request directly. @@ -43,12 +46,60 @@ pub struct DataColumnsByRootRequestId { pub requester: DataColumnsByRootRequester, } +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct BlocksByRangeRequestId { + /// Id to identify this attempt at a blocks_by_range request for `parent_request_id` + pub id: Id, + /// The Id of the overall By Range request for block components. + pub parent_request_id: ComponentsByRangeRequestId, +} + +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct BlobsByRangeRequestId { + /// Id to identify this attempt at a blobs_by_range request for `parent_request_id` + pub id: Id, + /// The Id of the overall By Range request for block components. + pub parent_request_id: ComponentsByRangeRequestId, +} + +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct DataColumnsByRangeRequestId { + /// Id to identify this attempt at a data_columns_by_range request for `parent_request_id` + pub id: Id, + /// The Id of the overall By Range request for block components. + pub parent_request_id: ComponentsByRangeRequestId, +} + +/// Block components by range request for range sync. Includes an ID for downstream consumers to +/// handle retries and tie all their sub requests together. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct ComponentsByRangeRequestId { + /// Each `RangeRequestId` may request the same data in a later retry. This Id identifies the + /// current attempt. + pub id: Id, + /// What sync component is issuing a components by range request and expecting data back + pub requester: RangeRequestId, +} + +/// Range sync chain or backfill batch +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub enum RangeRequestId { + RangeSync { chain_id: Id, batch_id: Epoch }, + BackfillSync { batch_id: Epoch }, +} + #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum DataColumnsByRootRequester { Sampling(SamplingId), Custody(CustodyId), } +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub enum RangeRequester { + RangeSync { chain_id: u64, batch_id: Epoch }, + BackfillSync { batch_id: Epoch }, +} + #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub struct SamplingId { pub id: SamplingRequester, @@ -183,9 +234,108 @@ impl slog::Value for RequestId { } } -// This custom impl reduces log boilerplate not printing `DataColumnsByRootRequestId` on each id log -impl std::fmt::Display for DataColumnsByRootRequestId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{} {:?}", self.id, self.requester) +macro_rules! impl_display { + ($structname: ty, $format: literal, $($field:ident),*) => { + impl Display for $structname { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, $format, $(self.$field,)*) + } + } + }; +} + +// Since each request Id is deeply nested with various types, if rendered with Debug on logs they +// take too much visual space. This custom Display implementations make the overall Id short while +// not losing information +impl_display!(BlocksByRangeRequestId, "{}/{}", id, parent_request_id); +impl_display!(BlobsByRangeRequestId, "{}/{}", id, parent_request_id); +impl_display!(DataColumnsByRangeRequestId, "{}/{}", id, parent_request_id); +impl_display!(ComponentsByRangeRequestId, "{}/{}", id, requester); +impl_display!(DataColumnsByRootRequestId, "{}/{}", id, requester); +impl_display!(SingleLookupReqId, "{}/Lookup/{}", req_id, lookup_id); +impl_display!(CustodyId, "{}", requester); +impl_display!(SamplingId, "{}/{}", sampling_request_id, id); + +impl Display for DataColumnsByRootRequester { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::Custody(id) => write!(f, "Custody/{id}"), + Self::Sampling(id) => write!(f, "Sampling/{id}"), + } + } +} + +impl Display for CustodyRequester { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Display for RangeRequestId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::RangeSync { chain_id, batch_id } => write!(f, "RangeSync/{batch_id}/{chain_id}"), + Self::BackfillSync { batch_id } => write!(f, "BackfillSync/{batch_id}"), + } + } +} + +impl Display for SamplingRequestId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Display for SamplingRequester { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Self::ImportedBlock(block) => write!(f, "ImportedBlock/{block}"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn display_id_data_columns_by_root_custody() { + let id = DataColumnsByRootRequestId { + id: 123, + requester: DataColumnsByRootRequester::Custody(CustodyId { + requester: CustodyRequester(SingleLookupReqId { + req_id: 121, + lookup_id: 101, + }), + }), + }; + assert_eq!(format!("{id}"), "123/Custody/121/Lookup/101"); + } + + #[test] + fn display_id_data_columns_by_root_sampling() { + let id = DataColumnsByRootRequestId { + id: 123, + requester: DataColumnsByRootRequester::Sampling(SamplingId { + id: SamplingRequester::ImportedBlock(Hash256::ZERO), + sampling_request_id: SamplingRequestId(101), + }), + }; + assert_eq!(format!("{id}"), "123/Sampling/101/ImportedBlock/0x0000000000000000000000000000000000000000000000000000000000000000"); + } + + #[test] + fn display_id_data_columns_by_range() { + let id = DataColumnsByRangeRequestId { + id: 123, + parent_request_id: ComponentsByRangeRequestId { + id: 122, + requester: RangeRequestId::RangeSync { + chain_id: 54, + batch_id: Epoch::new(0), + }, + }, + }; + assert_eq!(format!("{id}"), "123/122/RangeSync/0/54"); } } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 4738c76d0c..8586fd9cd3 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -282,7 +282,7 @@ impl Network { let max_topics = ctx.chain_spec.attestation_subnet_count as usize + SYNC_COMMITTEE_SUBNET_COUNT as usize - + ctx.chain_spec.blob_sidecar_subnet_count_electra as usize + + ctx.chain_spec.blob_sidecar_subnet_count_max() as usize + ctx.chain_spec.data_column_sidecar_subnet_count as usize + BASE_CORE_TOPICS.len() + ALTAIR_CORE_TOPICS.len() @@ -708,11 +708,17 @@ impl Network { } // Subscribe to core topics for the new fork - for kind in fork_core_topics::(&new_fork, &self.fork_context.spec) { + for kind in fork_core_topics::( + &new_fork, + &self.fork_context.spec, + &self.network_globals.as_topic_config(), + ) { let topic = GossipTopic::new(kind, GossipEncoding::default(), new_fork_digest); self.subscribe(topic); } + // TODO(das): unsubscribe from blob topics at the Fulu fork + // Register the new topics for metrics let topics_to_keep_metrics_for = attestation_sync_committee_topics::() .map(|gossip_kind| { @@ -1846,7 +1852,7 @@ impl Network { None } #[allow(unreachable_patterns)] - BehaviourEvent::ConnectionLimits(le) => void::unreachable(le), + BehaviourEvent::ConnectionLimits(le) => libp2p::core::util::unreachable(le), }, SwarmEvent::ConnectionEstablished { .. } => None, SwarmEvent::ConnectionClosed { .. } => None, diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 5746c13c58..72c2b29102 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -263,11 +263,7 @@ pub(crate) fn create_whitelist_filter( for id in 0..sync_committee_subnet_count { add(SyncCommitteeMessage(SyncSubnetId::new(id))); } - let blob_subnet_count = if spec.electra_fork_epoch.is_some() { - spec.blob_sidecar_subnet_count_electra - } else { - spec.blob_sidecar_subnet_count - }; + let blob_subnet_count = spec.blob_sidecar_subnet_count_max(); for id in 0..blob_subnet_count { add(BlobSidecar(id)); } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 8cce9a0f25..2800b75133 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -1,4 +1,5 @@ //! A collection of variables that are accessible outside of the network thread itself. +use super::TopicConfig; use crate::peer_manager::peerdb::PeerDB; use crate::rpc::{MetaData, MetaDataV3}; use crate::types::{BackFillState, SyncState}; @@ -183,6 +184,14 @@ impl NetworkGlobals { .collect::>() } + /// Returns the TopicConfig to compute the set of Gossip topics for a given fork + pub fn as_topic_config(&self) -> TopicConfig { + TopicConfig { + subscribe_all_data_column_subnets: self.config.subscribe_all_data_column_subnets, + sampling_subnets: &self.sampling_subnets, + } + } + /// TESTING ONLY. Build a dummy NetworkGlobals instance. pub fn new_test_globals( trusted_peers: Vec, @@ -223,7 +232,7 @@ mod test { fn test_sampling_subnets() { let log = logging::test_logger(); let mut spec = E::default_spec(); - spec.eip7594_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(Epoch::new(0)); let custody_group_count = spec.number_of_custody_groups / 2; let subnet_sampling_size = spec.sampling_size(custody_group_count).unwrap(); @@ -247,7 +256,7 @@ mod test { fn test_sampling_columns() { let log = logging::test_logger(); let mut spec = E::default_spec(); - spec.eip7594_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(Epoch::new(0)); let custody_group_count = spec.number_of_custody_groups / 2; let subnet_sampling_size = spec.sampling_size(custody_group_count).unwrap(); diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 30d9489225..846cf8386d 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -16,6 +16,6 @@ pub use pubsub::{PubsubMessage, SnappyTransform}; pub use subnet::{Subnet, SubnetDiscovery}; pub use topics::{ attestation_sync_committee_topics, core_topics_to_subscribe, fork_core_topics, - subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, ALTAIR_CORE_TOPICS, - BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, + subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, TopicConfig, + ALTAIR_CORE_TOPICS, BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 1e1f3efa18..c199d2312b 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -283,27 +283,15 @@ impl PubsubMessage { } GossipKind::DataColumnSidecar(subnet_id) => { match fork_context.from_context_bytes(gossip_topic.fork_digest) { - // TODO(das): Remove Deneb fork - Some(fork) if fork.deneb_enabled() => { + Some(fork) if fork.fulu_enabled() => { let col_sidecar = Arc::new( DataColumnSidecar::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ); - let peer_das_enabled = - fork_context.spec.is_peer_das_enabled_for_epoch( - col_sidecar.slot().epoch(E::slots_per_epoch()), - ); - if peer_das_enabled { - Ok(PubsubMessage::DataColumnSidecar(Box::new(( - *subnet_id, - col_sidecar, - )))) - } else { - Err(format!( - "data_column_sidecar topic invalid for given fork digest {:?}", - gossip_topic.fork_digest - )) - } + Ok(PubsubMessage::DataColumnSidecar(Box::new(( + *subnet_id, + col_sidecar, + )))) } Some(_) | None => Err(format!( "data_column_sidecar topic invalid for given fork digest {:?}", diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 475b459ccb..171dab09a3 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -1,5 +1,6 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; +use std::collections::HashSet; use strum::AsRefStr; use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; @@ -41,8 +42,18 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ GossipKind::LightClientOptimisticUpdate, ]; +#[derive(Debug)] +pub struct TopicConfig<'a> { + pub subscribe_all_data_column_subnets: bool, + pub sampling_subnets: &'a HashSet, +} + /// Returns the core topics associated with each fork that are new to the previous fork -pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> Vec { +pub fn fork_core_topics( + fork_name: &ForkName, + spec: &ChainSpec, + topic_config: &TopicConfig, +) -> Vec { match fork_name { ForkName::Base => BASE_CORE_TOPICS.to_vec(), ForkName::Altair => ALTAIR_CORE_TOPICS.to_vec(), @@ -51,7 +62,7 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V ForkName::Deneb => { // All of deneb blob topics are core topics let mut deneb_blob_topics = Vec::new(); - for i in 0..spec.blob_sidecar_subnet_count { + for i in 0..spec.blob_sidecar_subnet_count(ForkName::Deneb) { deneb_blob_topics.push(GossipKind::BlobSidecar(i)); } deneb_blob_topics @@ -59,12 +70,26 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V ForkName::Electra => { // All of electra blob topics are core topics let mut electra_blob_topics = Vec::new(); - for i in 0..spec.blob_sidecar_subnet_count_electra { + for i in 0..spec.blob_sidecar_subnet_count(ForkName::Electra) { electra_blob_topics.push(GossipKind::BlobSidecar(i)); } electra_blob_topics } - ForkName::Fulu => vec![], + ForkName::Fulu => { + let mut topics = vec![]; + if topic_config.subscribe_all_data_column_subnets { + for column_subnet in 0..spec.data_column_sidecar_subnet_count { + topics.push(GossipKind::DataColumnSidecar(DataColumnSubnetId::new( + column_subnet, + ))); + } + } else { + for column_subnet in topic_config.sampling_subnets { + topics.push(GossipKind::DataColumnSidecar(*column_subnet)); + } + } + topics + } } } @@ -84,10 +109,11 @@ pub fn attestation_sync_committee_topics() -> impl Iterator( mut current_fork: ForkName, spec: &ChainSpec, + topic_config: &TopicConfig, ) -> Vec { - let mut topics = fork_core_topics::(¤t_fork, spec); + let mut topics = fork_core_topics::(¤t_fork, spec, topic_config); while let Some(previous_fork) = current_fork.previous_fork() { - let previous_fork_topics = fork_core_topics::(&previous_fork, spec); + let previous_fork_topics = fork_core_topics::(&previous_fork, spec, topic_config); topics.extend(previous_fork_topics); current_fork = previous_fork; } @@ -475,8 +501,15 @@ mod tests { type E = MainnetEthSpec; let spec = E::default_spec(); let mut all_topics = Vec::new(); - let mut electra_core_topics = fork_core_topics::(&ForkName::Electra, &spec); - let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb, &spec); + let topic_config = TopicConfig { + subscribe_all_data_column_subnets: false, + sampling_subnets: &HashSet::from_iter([1, 2].map(DataColumnSubnetId::new)), + }; + let mut fulu_core_topics = fork_core_topics::(&ForkName::Fulu, &spec, &topic_config); + let mut electra_core_topics = + fork_core_topics::(&ForkName::Electra, &spec, &topic_config); + let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb, &spec, &topic_config); + all_topics.append(&mut fulu_core_topics); all_topics.append(&mut electra_core_topics); all_topics.append(&mut deneb_core_topics); all_topics.extend(CAPELLA_CORE_TOPICS); @@ -484,7 +517,7 @@ mod tests { all_topics.extend(BASE_CORE_TOPICS); let latest_fork = *ForkName::list_all().last().unwrap(); - let core_topics = core_topics_to_subscribe::(latest_fork, &spec); + let core_topics = core_topics_to_subscribe::(latest_fork, &spec, &topic_config); // Need to check all the topics exist in an order independent manner for topic in all_topics { assert!(core_topics.contains(&topic)); diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index f721c8477c..4b54a24ddc 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -16,7 +16,7 @@ use tokio::time::sleep; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, MinimalEthSpec, - Signature, SignedBeaconBlock, Slot, + RuntimeVariableList, Signature, SignedBeaconBlock, Slot, }; type E = MinimalEthSpec; @@ -810,17 +810,20 @@ fn test_tcp_blocks_by_root_chunked_rpc() { .await; // BlocksByRoot Request - let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( - vec![ - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - ], - &spec, - )); + let rpc_request = + RequestType::BlocksByRoot(BlocksByRootRequest::V2(BlocksByRootRequestV2 { + block_roots: RuntimeVariableList::from_vec( + vec![ + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + ], + spec.max_request_blocks_upper_bound(), + ), + })); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); @@ -953,21 +956,24 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { .await; // BlocksByRoot Request - let rpc_request = RequestType::BlocksByRoot(BlocksByRootRequest::new( - vec![ - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - Hash256::zero(), - ], - &spec, - )); + let rpc_request = + RequestType::BlocksByRoot(BlocksByRootRequest::V2(BlocksByRootRequestV2 { + block_roots: RuntimeVariableList::from_vec( + vec![ + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + Hash256::zero(), + ], + spec.max_request_blocks_upper_bound(), + ), + })); // BlocksByRoot Response let full_block = BeaconBlock::Base(BeaconBlockBase::::full(&spec)); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 44f6c54bbc..09179c4a51 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -31,7 +31,7 @@ execution_layer = { workspace = true } fnv = { workspace = true } futures = { workspace = true } hex = { workspace = true } -igd-next = "0.14" +igd-next = { version = "0.16", features = ["aio_tokio"] } itertools = { workspace = true } lighthouse_network = { workspace = true } logging = { workspace = true } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 6b5753e96a..090b963cbc 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -14,6 +14,7 @@ use beacon_chain::{ light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, observed_operations::ObservationOutcome, + single_attestation::single_attestation_to_attestation, sync_committee_verification::{self, Error as SyncCommitteeError}, validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, @@ -32,12 +33,12 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - beacon_block::BlockImportSource, Attestation, AttestationRef, AttesterSlashing, BlobSidecar, - DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + beacon_block::BlockImportSource, Attestation, AttestationData, AttestationRef, + AttesterSlashing, BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, + IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, - SyncSubnetId, + SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; use beacon_processor::{ @@ -45,7 +46,7 @@ use beacon_processor::{ QueuedAggregate, QueuedGossipBlock, QueuedLightClientUpdate, QueuedUnaggregate, ReprocessQueueMessage, }, - DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, + DuplicateCache, GossipAggregatePackage, GossipAttestationBatch, }; /// Set to `true` to introduce stricter penalties for peers who send some types of late consensus @@ -127,6 +128,11 @@ enum FailedAtt { should_import: bool, seen_timestamp: Duration, }, + // This variant is just a dummy variant for now, as SingleAttestation reprocessing is handled + // separately. + SingleUnaggregate { + attestation: Box, + }, Aggregate { attestation: Box>, seen_timestamp: Duration, @@ -135,20 +141,22 @@ enum FailedAtt { impl FailedAtt { pub fn beacon_block_root(&self) -> &Hash256 { - &self.attestation().data().beacon_block_root + &self.attestation_data().beacon_block_root } pub fn kind(&self) -> &'static str { match self { FailedAtt::Unaggregate { .. } => "unaggregated", + FailedAtt::SingleUnaggregate { .. } => "unaggregated", FailedAtt::Aggregate { .. } => "aggregated", } } - pub fn attestation(&self) -> AttestationRef { + pub fn attestation_data(&self) -> &AttestationData { match self { - FailedAtt::Unaggregate { attestation, .. } => attestation.to_ref(), - FailedAtt::Aggregate { attestation, .. } => attestation.message().aggregate(), + FailedAtt::Unaggregate { attestation, .. } => attestation.data(), + FailedAtt::SingleUnaggregate { attestation, .. } => &attestation.data, + FailedAtt::Aggregate { attestation, .. } => attestation.message().aggregate().data(), } } } @@ -229,7 +237,7 @@ impl NetworkBeaconProcessor { pub fn process_gossip_attestation_batch( self: Arc, - packages: Vec>, + packages: GossipAttestationBatch, reprocess_tx: Option>, ) { let attestations_and_subnets = packages @@ -399,6 +407,155 @@ impl NetworkBeaconProcessor { } } + /// Process an unaggregated attestation requiring conversion. + /// + /// This function performs the conversion, and if successfull queues a new message to be + /// processed by `process_gossip_attestation`. If unsuccessful due to block unavailability, + /// a retry message will be pushed to the `reprocess_tx` if it is `Some`. + #[allow(clippy::too_many_arguments)] + pub fn process_gossip_attestation_to_convert( + self: Arc, + message_id: MessageId, + peer_id: PeerId, + single_attestation: Box, + subnet_id: SubnetId, + should_import: bool, + reprocess_tx: Option>, + seen_timestamp: Duration, + ) { + let conversion_result = self.chain.with_committee_cache( + single_attestation.data.target.root, + single_attestation + .data + .slot + .epoch(T::EthSpec::slots_per_epoch()), + |committee_cache, _| { + let slot = single_attestation.data.slot; + let committee_index = single_attestation.committee_index; + let Some(committee) = committee_cache.get_beacon_committee(slot, committee_index) + else { + return Ok(Err(AttnError::NoCommitteeForSlotAndIndex { + slot, + index: committee_index, + })); + }; + + Ok(single_attestation_to_attestation( + &single_attestation, + committee.committee, + )) + }, + ); + + match conversion_result { + Ok(Ok(attestation)) => { + let slot = attestation.data().slot; + if let Err(e) = self.send_unaggregated_attestation( + message_id.clone(), + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + ) { + error!( + &self.log, + "Unable to queue converted SingleAttestation"; + "error" => %e, + "slot" => slot, + ); + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } + // Outermost error (from `with_committee_cache`) indicating that the block is not known + // and that this conversion should be retried. + Err(BeaconChainError::MissingBeaconBlock(beacon_block_root)) => { + if let Some(sender) = reprocess_tx { + metrics::inc_counter( + &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_REQUEUED_TOTAL, + ); + // We don't know the block, get the sync manager to handle the block lookup, and + // send the attestation to be scheduled for re-processing. + self.sync_tx + .send(SyncMessage::UnknownBlockHashFromAttestation( + peer_id, + beacon_block_root, + )) + .unwrap_or_else(|_| { + warn!( + self.log, + "Failed to send to sync service"; + "msg" => "UnknownBlockHash" + ) + }); + let processor = self.clone(); + // Do not allow this attestation to be re-processed beyond this point. + let reprocess_msg = + ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate { + beacon_block_root, + process_fn: Box::new(move || { + processor.process_gossip_attestation_to_convert( + message_id, + peer_id, + single_attestation, + subnet_id, + should_import, + None, + seen_timestamp, + ) + }), + }); + if sender.try_send(reprocess_msg).is_err() { + error!( + self.log, + "Failed to send attestation for re-processing"; + ) + } + } else { + // We shouldn't make any further attempts to process this attestation. + // + // Don't downscore the peer since it's not clear if we requested this head + // block from them or not. + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); + } + } + Ok(Err(error)) => { + // We already handled reprocessing above so do not attempt it in the error handler. + self.handle_attestation_verification_failure( + peer_id, + message_id, + FailedAtt::SingleUnaggregate { + attestation: single_attestation, + }, + None, + error, + seen_timestamp, + ); + } + Err(error) => { + // We already handled reprocessing above so do not attempt it in the error handler. + self.handle_attestation_verification_failure( + peer_id, + message_id, + FailedAtt::SingleUnaggregate { + attestation: single_attestation, + }, + None, + AttnError::BeaconChainError(error), + seen_timestamp, + ); + } + } + } + /// Process the aggregated attestation received from the gossip network and: /// /// - If it passes gossip propagation criteria, tell the network thread to forward it. @@ -1290,13 +1447,12 @@ impl NetworkBeaconProcessor { Err(e @ BlockError::StateRootMismatch { .. }) | Err(e @ BlockError::IncorrectBlockProposer { .. }) | Err(e @ BlockError::BlockSlotLimitReached) - | Err(e @ BlockError::ProposalSignatureInvalid) | Err(e @ BlockError::NonLinearSlots) | Err(e @ BlockError::UnknownValidator(_)) | Err(e @ BlockError::PerBlockProcessingError(_)) | Err(e @ BlockError::NonLinearParentRoots) | Err(e @ BlockError::BlockIsNotLaterThanParent { .. }) - | Err(e @ BlockError::InvalidSignature) + | Err(e @ BlockError::InvalidSignature(_)) | Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::ExecutionPayloadError(_)) @@ -2208,9 +2364,9 @@ impl NetworkBeaconProcessor { // network. let seen_clock = &self.chain.slot_clock.freeze_at(seen_timestamp); let hindsight_verification = - attestation_verification::verify_propagation_slot_range( + attestation_verification::verify_propagation_slot_range::<_, T::EthSpec>( seen_clock, - failed_att.attestation(), + failed_att.attestation_data(), &self.chain.spec, ); @@ -2295,6 +2451,19 @@ impl NetworkBeaconProcessor { "attn_agg_not_in_committee", ); } + AttnError::AttesterNotInCommittee { .. } => { + /* + * `SingleAttestation` from a validator is invalid because the `attester_index` is + * not in the claimed committee. There is no reason a non-faulty validator would + * send this message. + */ + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "attn_single_not_in_committee", + ); + } AttnError::AttestationSupersetKnown { .. } => { /* * The aggregate attestation has already been observed on the network or in @@ -2440,6 +2609,17 @@ impl NetworkBeaconProcessor { }), }) } + FailedAtt::SingleUnaggregate { .. } => { + // This should never happen, as we handle the unknown head block case + // for `SingleAttestation`s separately and should not be able to hit + // an `UnknownHeadBlock` error. + error!( + self.log, + "Dropping SingleAttestation instead of requeueing"; + "block_root" => ?beacon_block_root, + ); + return; + } FailedAtt::Unaggregate { attestation, subnet_id, @@ -2662,7 +2842,7 @@ impl NetworkBeaconProcessor { self.log, "Ignored attestation to finalized block"; "block_root" => ?beacon_block_root, - "attestation_slot" => failed_att.attestation().data().slot, + "attestation_slot" => failed_att.attestation_data().slot, ); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); @@ -2685,9 +2865,9 @@ impl NetworkBeaconProcessor { debug!( self.log, "Dropping attestation"; - "target_root" => ?failed_att.attestation().data().target.root, + "target_root" => ?failed_att.attestation_data().target.root, "beacon_block_root" => ?beacon_block_root, - "slot" => ?failed_att.attestation().data().slot, + "slot" => ?failed_att.attestation_data().slot, "type" => ?attestation_type, "error" => ?e, "peer_id" => % peer_id @@ -2706,7 +2886,7 @@ impl NetworkBeaconProcessor { self.log, "Unable to validate attestation"; "beacon_block_root" => ?beacon_block_root, - "slot" => ?failed_att.attestation().data().slot, + "slot" => ?failed_att.attestation_data().slot, "type" => ?attestation_type, "peer_id" => %peer_id, "error" => ?e, @@ -3107,9 +3287,9 @@ impl NetworkBeaconProcessor { message_id: MessageId, peer_id: PeerId, ) { - let is_timely = attestation_verification::verify_propagation_slot_range( + let is_timely = attestation_verification::verify_propagation_slot_range::<_, T::EthSpec>( &self.chain.slot_clock, - attestation, + attestation.data(), &self.chain.spec, ) .is_ok(); diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 4a3fb28e10..c06a1f6ee3 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -94,46 +94,34 @@ impl NetworkBeaconProcessor { should_import: bool, seen_timestamp: Duration, ) -> Result<(), Error> { - let result = self.chain.with_committee_cache( - single_attestation.data.target.root, - single_attestation - .data - .slot - .epoch(T::EthSpec::slots_per_epoch()), - |committee_cache, _| { - let Some(committee) = committee_cache.get_beacon_committee( - single_attestation.data.slot, - single_attestation.committee_index as u64, - ) else { - warn!( - self.log, - "No beacon committee for slot and index"; - "slot" => single_attestation.data.slot, - "index" => single_attestation.committee_index - ); - return Ok(Ok(())); - }; + let processor = self.clone(); + let process_individual = move |package: GossipAttestationPackage| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_attestation_to_convert( + package.message_id, + package.peer_id, + package.attestation, + package.subnet_id, + package.should_import, + Some(reprocess_tx), + package.seen_timestamp, + ) + }; - let attestation = single_attestation.to_attestation(committee.committee)?; - - Ok(self.send_unaggregated_attestation( - message_id.clone(), + self.try_send(BeaconWorkEvent { + drop_during_sync: true, + work: Work::GossipAttestationToConvert { + attestation: Box::new(GossipAttestationPackage { + message_id, peer_id, - attestation, + attestation: Box::new(single_attestation), subnet_id, should_import, seen_timestamp, - )) + }), + process_individual: Box::new(process_individual), }, - ); - - match result { - Ok(result) => result, - Err(e) => { - warn!(self.log, "Failed to send SingleAttestation"; "error" => ?e); - Ok(()) - } - } + }) } /// Create a new `Work` event for some unaggregated attestation. @@ -148,18 +136,19 @@ impl NetworkBeaconProcessor { ) -> Result<(), Error> { // Define a closure for processing individual attestations. let processor = self.clone(); - let process_individual = move |package: GossipAttestationPackage| { - let reprocess_tx = processor.reprocess_tx.clone(); - processor.process_gossip_attestation( - package.message_id, - package.peer_id, - package.attestation, - package.subnet_id, - package.should_import, - Some(reprocess_tx), - package.seen_timestamp, - ) - }; + let process_individual = + move |package: GossipAttestationPackage>| { + let reprocess_tx = processor.reprocess_tx.clone(); + processor.process_gossip_attestation( + package.message_id, + package.peer_id, + package.attestation, + package.subnet_id, + package.should_import, + Some(reprocess_tx), + package.seen_timestamp, + ) + }; // Define a closure for processing batches of attestations. let processor = self.clone(); @@ -613,6 +602,11 @@ impl NetworkBeaconProcessor { blocks: Vec>, ) -> Result<(), Error> { let is_backfill = matches!(&process_id, ChainSegmentProcessId::BackSyncBatchId { .. }); + debug!(self.log, "Batch sending for process"; + "blocks" => blocks.len(), + "id" => ?process_id, + ); + let processor = self.clone(); let process_fn = async move { let notify_execution_layer = if processor diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index b4f19f668d..67a1570275 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -659,24 +659,6 @@ impl NetworkBeaconProcessor { "start_slot" => req.start_slot(), ); - // Should not send more than max request blocks - let max_request_size = - self.chain - .epoch() - .map_or(self.chain.spec.max_request_blocks, |epoch| { - if self.chain.spec.fork_name_at_epoch(epoch).deneb_enabled() { - self.chain.spec.max_request_blocks_deneb - } else { - self.chain.spec.max_request_blocks - } - }); - if *req.count() > max_request_size { - return Err(( - RpcErrorResponse::InvalidRequest, - "Request exceeded max size", - )); - } - let forwards_block_root_iter = match self .chain .forwards_iter_block_roots(Slot::from(*req.start_slot())) diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 817e6b6440..338f2bc4c8 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -483,6 +483,7 @@ impl NetworkBeaconProcessor { debug!(self.log, "Backfill batch processed"; "batch_epoch" => epoch, "first_block_slot" => start_slot, + "keep_execution_payload" => !self.chain.store.get_config().prune_payloads, "last_block_slot" => end_slot, "processed_blocks" => sent_blocks, "processed_blobs" => n_blobs, diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 8238fa146d..8415ece638 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -15,7 +15,7 @@ use beacon_chain::test_utils::{ use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; use lighthouse_network::discovery::ConnectionId; -use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MetaDataV3}; use lighthouse_network::rpc::{RequestId, SubstreamId}; use lighthouse_network::{ discv5::enr::{self, CombinedKey}, @@ -198,11 +198,21 @@ impl TestRig { let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); // Default metadata - let meta_data = MetaData::V2(MetaDataV2 { - seq_number: SEQ_NUMBER, - attnets: EnrAttestationBitfield::::default(), - syncnets: EnrSyncCommitteeBitfield::::default(), - }); + let meta_data = if spec.is_peer_das_scheduled() { + MetaData::V3(MetaDataV3 { + seq_number: SEQ_NUMBER, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + custody_group_count: spec.custody_requirement, + }) + } else { + MetaData::V2(MetaDataV2 { + seq_number: SEQ_NUMBER, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }) + }; + let enr_key = CombinedKey::generate_secp256k1(); let enr = enr::Enr::builder().build(&enr_key).unwrap(); let network_config = Arc::new(NetworkConfig::default()); @@ -342,6 +352,7 @@ impl TestRig { ) .unwrap(); } + pub fn enqueue_single_lookup_rpc_blobs(&self) { if let Some(blobs) = self.next_blobs.clone() { let blobs = FixedBlobSidecarList::new(blobs.into_iter().map(Some).collect::>()); @@ -350,7 +361,7 @@ impl TestRig { self.next_block.canonical_root(), blobs, std::time::Duration::default(), - BlockProcessType::SingleBlock { id: 1 }, + BlockProcessType::SingleBlob { id: 1 }, ) .unwrap(); } diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index d3da341e1c..36e5c391e9 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -28,7 +28,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, SignedBeaconBlock}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -90,6 +90,7 @@ impl Router { invalid_block_storage: InvalidBlockStorage, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, + fork_context: Arc, log: slog::Logger, ) -> Result>, String> { let message_handler_log = log.new(o!("service"=> "router")); @@ -122,6 +123,7 @@ impl Router { network_send.clone(), network_beacon_processor.clone(), sync_recv, + fork_context, sync_logger, ); @@ -622,7 +624,7 @@ impl Router { ) { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { - id @ SyncRequestId::RangeBlockAndBlobs { .. } => id, + id @ SyncRequestId::BlocksByRange { .. } => id, other => { crit!(self.log, "BlocksByRange response on incorrect request"; "request" => ?other); return; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index f89241b4ae..1b2a681c64 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -33,8 +33,8 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use types::{ - ChainSpec, DataColumnSubnetId, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, - SyncSubnetId, Unsigned, ValidatorSubscription, + ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, + Unsigned, ValidatorSubscription, }; mod tests; @@ -181,8 +181,6 @@ pub struct NetworkService { next_fork_subscriptions: Pin>>, /// A delay that expires when we need to unsubscribe from old fork topics. next_unsubscribe: Pin>>, - /// Subscribe to all the data column subnets. - subscribe_all_data_column_subnets: bool, /// Subscribe to all the subnets once synced. subscribe_all_subnets: bool, /// Shutdown beacon node after sync is complete. @@ -312,6 +310,7 @@ impl NetworkService { invalid_block_storage, beacon_processor_send, beacon_processor_reprocess_tx, + fork_context.clone(), network_log.clone(), )?; @@ -348,7 +347,6 @@ impl NetworkService { next_fork_update, next_fork_subscriptions, next_unsubscribe, - subscribe_all_data_column_subnets: config.subscribe_all_data_column_subnets, subscribe_all_subnets: config.subscribe_all_subnets, shutdown_after_sync: config.shutdown_after_sync, metrics_enabled: config.metrics_enabled, @@ -716,6 +714,7 @@ impl NetworkService { for topic_kind in core_topics_to_subscribe::( self.fork_context.current_fork(), &self.fork_context.spec, + &self.network_globals.as_topic_config(), ) { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new( @@ -750,15 +749,6 @@ impl NetworkService { } } - // TODO(das): This is added here for the purpose of testing, *without* having to - // activate Electra. This should happen as part of the Electra upgrade and we should - // move the subscription logic once it's ready to rebase PeerDAS on Electra, or if - // we decide to activate via the soft fork route: - // https://github.com/sigp/lighthouse/pull/5899 - if self.fork_context.spec.is_peer_das_scheduled() { - self.subscribe_to_peer_das_topics(&mut subscribed_topics); - } - // If we are to subscribe to all subnets we do it here if self.subscribe_all_subnets { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { @@ -805,37 +795,6 @@ impl NetworkService { } } - fn subscribe_to_peer_das_topics(&mut self, subscribed_topics: &mut Vec) { - if self.subscribe_all_data_column_subnets { - for column_subnet in 0..self.fork_context.spec.data_column_sidecar_subnet_count { - for fork_digest in self.required_gossip_fork_digests() { - let gossip_kind = - Subnet::DataColumn(DataColumnSubnetId::new(column_subnet)).into(); - let topic = - GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); - if self.libp2p.subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - } else { - for column_subnet in &self.network_globals.sampling_subnets { - for fork_digest in self.required_gossip_fork_digests() { - let gossip_kind = Subnet::DataColumn(*column_subnet).into(); - let topic = - GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); - if self.libp2p.subscribe(topic.clone()) { - subscribed_topics.push(topic); - } else { - warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); - } - } - } - } - } - /// Handle a message sent to the network service. async fn on_validator_subscription_msg(&mut self, msg: ValidatorSubscriptionMessage) { match msg { @@ -951,6 +910,7 @@ impl NetworkService { let core_topics = core_topics_to_subscribe::( self.fork_context.current_fork(), &self.fork_context.spec, + &self.network_globals.as_topic_config(), ); let core_topics: HashSet<&GossipKind> = HashSet::from_iter(&core_topics); let subscriptions = self.network_globals.gossipsub_subscriptions.read(); diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index 33ae567eb3..de90e22254 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -216,6 +216,12 @@ impl SubnetService { || self.permanent_attestation_subscriptions.contains(subnet) } + /// Returns whether we are subscribed to a permanent subnet for testing purposes. + #[cfg(test)] + pub(crate) fn is_subscribed_permanent(&self, subnet: &Subnet) -> bool { + self.permanent_attestation_subscriptions.contains(subnet) + } + /// Processes a list of validator subscriptions. /// /// This is fundamentally called form the HTTP API when a validator requests duties from us @@ -629,9 +635,10 @@ impl Stream for SubnetService { // expire subscription. match self.scheduled_subscriptions.poll_next_unpin(cx) { Poll::Ready(Some(Ok(exact_subnet))) => { - let ExactSubnet { subnet, .. } = exact_subnet; - let current_slot = self.beacon_chain.slot_clock.now().unwrap_or_default(); - if let Err(e) = self.subscribe_to_subnet_immediately(subnet, current_slot + 1) { + let ExactSubnet { subnet, slot } = exact_subnet; + // Set the `end_slot` for the subscription to be `duty.slot + 1` so that we unsubscribe + // only at the end of the duty slot. + if let Err(e) = self.subscribe_to_subnet_immediately(subnet, slot + 1) { debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet, "err" => e); } self.waker diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 7283b4af31..0f3343df63 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -7,9 +7,6 @@ use beacon_chain::{ }; use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::NetworkConfig; -use logging::test_logger; -use slog::{o, Drain, Logger}; -use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::sync::{Arc, LazyLock}; use std::time::{Duration, SystemTime}; @@ -21,10 +18,6 @@ use types::{ SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; -// Set to enable/disable logging -// const TEST_LOG_LEVEL: Option = Some(slog::Level::Debug); -const TEST_LOG_LEVEL: Option = None; - const SLOT_DURATION_MILLIS: u64 = 400; type TestBeaconChainType = Witness< @@ -46,7 +39,7 @@ impl TestBeaconChain { let keypairs = generate_deterministic_keypairs(1); - let log = get_logger(TEST_LOG_LEVEL); + let log = logging::test_logger(); let store = HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap(); @@ -98,28 +91,10 @@ pub fn recent_genesis_time() -> u64 { .as_secs() } -fn get_logger(log_level: Option) -> Logger { - if let Some(level) = log_level { - let drain = { - let decorator = slog_term::TermDecorator::new().build(); - let decorator = - logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).chan_size(2048).build(); - drain.filter_level(level) - }; - - Logger::root(drain.fuse(), o!()) - } else { - let builder = NullLoggerBuilder; - builder.build().expect("should build logger") - } -} - static CHAIN: LazyLock = LazyLock::new(TestBeaconChain::new_with_system_clock); fn get_subnet_service() -> SubnetService { - let log = test_logger(); + let log = logging::test_logger(); let config = NetworkConfig::default(); let beacon_chain = CHAIN.chain.clone(); @@ -501,8 +476,6 @@ mod test { let committee_count = 1; // Makes 3 validator subscriptions to the same subnet but at different slots. - // There should be just 1 unsubscription event for each of the later slots subscriptions - // (subscription_slot2 and subscription_slot3). let subscription_slot1 = 0; let subscription_slot2 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; let subscription_slot3 = subscription_slot2 * 2; @@ -585,7 +558,7 @@ mod test { let expected_unsubscription = SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1)); - if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!(expected_subscription, events[0]); assert_eq!(expected_unsubscription, events[2]); } @@ -607,9 +580,18 @@ mod test { assert_eq!(no_events, []); - let second_subscribe_event = get_events(&mut subnet_service, None, 2).await; + let subscription_end_slot = current_slot + subscription_slot2 + 2; // +1 to get to the end of the duty slot, +1 for the slot to complete + let wait_slots = subnet_service + .beacon_chain + .slot_clock + .duration_to_slot(subscription_end_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; + + let second_subscribe_event = get_events(&mut subnet_service, None, wait_slots as u32).await; // If the permanent and short lived subnets are different, we should get an unsubscription event. - if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!( [ expected_subscription.clone(), @@ -633,9 +615,18 @@ mod test { assert_eq!(no_events, []); - let third_subscribe_event = get_events(&mut subnet_service, None, 2).await; + let subscription_end_slot = current_slot + subscription_slot3 + 2; // +1 to get to the end of the duty slot, +1 for the slot to complete + let wait_slots = subnet_service + .beacon_chain + .slot_clock + .duration_to_slot(subscription_end_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; - if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + let third_subscribe_event = get_events(&mut subnet_service, None, wait_slots as u32).await; + + if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!( [expected_subscription, expected_unsubscription], third_subscribe_event[..] diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 5703ed3504..4220f85fc3 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -388,67 +388,43 @@ impl BackFillSync { blocks: Vec>, ) -> Result { // check if we have this batch - let batch = match self.batches.get_mut(&batch_id) { - None => { - if !matches!(self.state(), BackFillState::Failed) { - // A batch might get removed when the chain advances, so this is non fatal. - debug!(self.log, "Received a block for unknown batch"; "epoch" => batch_id); - } - return Ok(ProcessResult::Successful); - } - Some(batch) => { - // A batch could be retried without the peer failing the request (disconnecting/ - // sending an error /timeout) if the peer is removed from the chain for other - // reasons. Check that this block belongs to the expected peer, and that the - // request_id matches - // TODO(das): removed peer_id matching as the node may request a different peer for data - // columns. - if !batch.is_expecting_block(&request_id) { - return Ok(ProcessResult::Successful); - } - batch + let Some(batch) = self.batches.get_mut(&batch_id) else { + if !matches!(self.state(), BackFillState::Failed) { + // A batch might get removed when the chain advances, so this is non fatal. + debug!(self.log, "Received a block for unknown batch"; "epoch" => batch_id); } + return Ok(ProcessResult::Successful); }; - { - // A stream termination has been sent. This batch has ended. Process a completed batch. - // Remove the request from the peer's active batches - self.active_requests - .get_mut(peer_id) - .map(|active_requests| active_requests.remove(&batch_id)); + // A batch could be retried without the peer failing the request (disconnecting/ + // sending an error /timeout) if the peer is removed from the chain for other + // reasons. Check that this block belongs to the expected peer, and that the + // request_id matches + // TODO(das): removed peer_id matching as the node may request a different peer for data + // columns. + if !batch.is_expecting_block(&request_id) { + return Ok(ProcessResult::Successful); + } - match batch.download_completed(blocks) { - Ok(received) => { - let awaiting_batches = - self.processing_target.saturating_sub(batch_id) / BACKFILL_EPOCHS_PER_BATCH; - debug!(self.log, "Completed batch received"; "epoch" => batch_id, "blocks" => received, "awaiting_batches" => awaiting_batches); + // A stream termination has been sent. This batch has ended. Process a completed batch. + // Remove the request from the peer's active batches + self.active_requests + .get_mut(peer_id) + .map(|active_requests| active_requests.remove(&batch_id)); - // pre-emptively request more blocks from peers whilst we process current blocks, - self.request_batches(network)?; - self.process_completed_batches(network) - } - Err(result) => { - let (expected_boundary, received_boundary, outcome) = match result { - Err(e) => { - return self - .fail_sync(BackFillError::BatchInvalidState(batch_id, e.0)) - .map(|_| ProcessResult::Successful); - } - Ok(v) => v, - }; - warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, - "peer_id" => %peer_id, batch); + match batch.download_completed(blocks) { + Ok(received) => { + let awaiting_batches = + self.processing_target.saturating_sub(batch_id) / BACKFILL_EPOCHS_PER_BATCH; + debug!(self.log, "Completed batch received"; "epoch" => batch_id, "blocks" => received, "awaiting_batches" => awaiting_batches); - if let BatchOperationOutcome::Failed { blacklist: _ } = outcome { - error!(self.log, "Backfill failed"; "epoch" => batch_id, "received_boundary" => received_boundary, "expected_boundary" => expected_boundary); - return self - .fail_sync(BackFillError::BatchDownloadFailed(batch_id)) - .map(|_| ProcessResult::Successful); - } - // this batch can't be used, so we need to request it again. - self.retry_batch_download(network, batch_id) - .map(|_| ProcessResult::Successful) - } + // pre-emptively request more blocks from peers whilst we process current blocks, + self.request_batches(network)?; + self.process_completed_batches(network) + } + Err(e) => { + self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; + Ok(ProcessResult::Successful) } } } @@ -582,20 +558,16 @@ impl BackFillSync { } }; - let peer = match batch.current_peer() { - Some(v) => *v, - None => { - return self - .fail_sync(BackFillError::BatchInvalidState( - batch_id, - String::from("Peer does not exist"), - )) - .map(|_| ProcessResult::Successful) - } + let Some(peer) = batch.current_peer() else { + self.fail_sync(BackFillError::BatchInvalidState( + batch_id, + String::from("Peer does not exist"), + ))?; + return Ok(ProcessResult::Successful); }; debug!(self.log, "Backfill batch processed"; "result" => ?result, &batch, - "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(&peer)); + "batch_epoch" => batch_id, "peer" => %peer, "client" => %network.client_type(peer)); match result { BatchProcessResult::Success { @@ -679,8 +651,8 @@ impl BackFillSync { { self.fail_sync(BackFillError::BatchInvalidState(batch_id, e.0))?; } - self.retry_batch_download(network, batch_id) - .map(|_| ProcessResult::Successful) + self.retry_batch_download(network, batch_id)?; + Ok(ProcessResult::Successful) } } } @@ -712,11 +684,10 @@ impl BackFillSync { // - AwaitingDownload -> A recoverable failed batch should have been // re-requested. // - Processing -> `self.current_processing_batch` is None - return self - .fail_sync(BackFillError::InvalidSyncState(String::from( - "Invalid expected batch state", - ))) - .map(|_| ProcessResult::Successful); + self.fail_sync(BackFillError::InvalidSyncState(String::from( + "Invalid expected batch state", + )))?; + return Ok(ProcessResult::Successful); } BatchState::AwaitingValidation(_) => { // TODO: I don't think this state is possible, log a CRIT just in case. @@ -731,12 +702,11 @@ impl BackFillSync { } } } else { - return self - .fail_sync(BackFillError::InvalidSyncState(format!( - "Batch not found for current processing target {}", - self.processing_target - ))) - .map(|_| ProcessResult::Successful); + self.fail_sync(BackFillError::InvalidSyncState(format!( + "Batch not found for current processing target {}", + self.processing_target + )))?; + return Ok(ProcessResult::Successful); } Ok(ProcessResult::Successful) } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index ac4df42a4e..2172c8dcd8 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -36,6 +36,7 @@ use beacon_chain::data_availability_checker::{ use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError}; pub use common::RequestState; use fnv::FnvHashMap; +use itertools::Itertools; use lighthouse_network::service::api_types::SingleLookupReqId; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; @@ -644,8 +645,15 @@ impl BlockLookups { // but future errors may follow the same pattern. Generalize this // pattern with https://github.com/sigp/lighthouse/pull/6321 BlockError::AvailabilityCheck( - AvailabilityCheckError::InvalidColumn(index, _), - ) => peer_group.of_index(index as usize).collect(), + AvailabilityCheckError::InvalidColumn(errors), + ) => errors + .iter() + // Collect all peers that sent a column that was invalid. Must + // run .unique as a single peer can send multiple invalid + // columns. Penalize once to avoid insta-bans + .flat_map(|(index, _)| peer_group.of_index((*index) as usize)) + .unique() + .collect(), _ => peer_group.all().collect(), }; for peer in peers_to_penalize { diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 7a234eaef0..6c8a8eab63 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,7 +1,6 @@ use beacon_chain::{ block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root, }; -use lighthouse_network::PeerId; use std::{ collections::{HashMap, VecDeque}, sync::Arc, @@ -29,9 +28,6 @@ pub struct RangeBlockComponentsRequest { /// Used to determine if the number of data columns stream termination this accumulator should /// wait for. This may be less than the number of `expects_custody_columns` due to request batching. num_custody_column_requests: Option, - /// The peers the request was made to. - pub(crate) peer_ids: Vec, - max_blobs_per_block: usize, } impl RangeBlockComponentsRequest { @@ -39,8 +35,6 @@ impl RangeBlockComponentsRequest { expects_blobs: bool, expects_custody_columns: Option>, num_custody_column_requests: Option, - peer_ids: Vec, - max_blobs_per_block: usize, ) -> Self { Self { blocks: <_>::default(), @@ -52,50 +46,42 @@ impl RangeBlockComponentsRequest { expects_blobs, expects_custody_columns, num_custody_column_requests, - peer_ids, - max_blobs_per_block, } } - // TODO: This function should be deprecated when simplying the retry mechanism of this range - // requests. - pub fn get_requirements(&self) -> (bool, Option>) { - (self.expects_blobs, self.expects_custody_columns.clone()) + pub fn add_blocks(&mut self, blocks: Vec>>) { + for block in blocks { + self.blocks.push_back(block); + } + self.is_blocks_stream_terminated = true; } - pub fn add_block_response(&mut self, block_opt: Option>>) { - match block_opt { - Some(block) => self.blocks.push_back(block), - None => self.is_blocks_stream_terminated = true, + pub fn add_blobs(&mut self, blobs: Vec>>) { + for blob in blobs { + self.blobs.push_back(blob); } + self.is_sidecars_stream_terminated = true; } - pub fn add_sidecar_response(&mut self, sidecar_opt: Option>>) { - match sidecar_opt { - Some(sidecar) => self.blobs.push_back(sidecar), - None => self.is_sidecars_stream_terminated = true, - } - } - - pub fn add_data_column(&mut self, column_opt: Option>>) { - match column_opt { - Some(column) => self.data_columns.push_back(column), - // TODO(das): this mechanism is dangerous, if somehow there are two requests for the - // same column index it can terminate early. This struct should track that all requests - // for all custody columns terminate. - None => self.custody_columns_streams_terminated += 1, + pub fn add_custody_columns(&mut self, columns: Vec>>) { + for column in columns { + self.data_columns.push_back(column); } + // TODO(das): this mechanism is dangerous, if somehow there are two requests for the + // same column index it can terminate early. This struct should track that all requests + // for all custody columns terminate. + self.custody_columns_streams_terminated += 1; } pub fn into_responses(self, spec: &ChainSpec) -> Result>, String> { if let Some(expects_custody_columns) = self.expects_custody_columns.clone() { self.into_responses_with_custody_columns(expects_custody_columns, spec) } else { - self.into_responses_with_blobs() + self.into_responses_with_blobs(spec) } } - fn into_responses_with_blobs(self) -> Result>, String> { + fn into_responses_with_blobs(self, spec: &ChainSpec) -> Result>, String> { let RangeBlockComponentsRequest { blocks, blobs, .. } = self; // There can't be more more blobs than blocks. i.e. sending any blob (empty @@ -103,7 +89,8 @@ impl RangeBlockComponentsRequest { let mut responses = Vec::with_capacity(blocks.len()); let mut blob_iter = blobs.into_iter().peekable(); for block in blocks.into_iter() { - let mut blob_list = Vec::with_capacity(self.max_blobs_per_block); + let max_blobs_per_block = spec.max_blobs_per_block(block.epoch()) as usize; + let mut blob_list = Vec::with_capacity(max_blobs_per_block); while { let pair_next_blob = blob_iter .peek() @@ -114,7 +101,7 @@ impl RangeBlockComponentsRequest { blob_list.push(blob_iter.next().ok_or("Missing next blob".to_string())?); } - let mut blobs_buffer = vec![None; self.max_blobs_per_block]; + let mut blobs_buffer = vec![None; max_blobs_per_block]; for blob in blob_list { let blob_index = blob.index as usize; let Some(blob_opt) = blobs_buffer.get_mut(blob_index) else { @@ -128,7 +115,7 @@ impl RangeBlockComponentsRequest { } let blobs = RuntimeVariableList::new( blobs_buffer.into_iter().flatten().collect::>(), - self.max_blobs_per_block, + max_blobs_per_block, ) .map_err(|_| "Blobs returned exceeds max length".to_string())?; responses.push(RpcBlock::new(None, block, Some(blobs)).map_err(|e| format!("{e:?}"))?) @@ -246,30 +233,25 @@ mod tests { use beacon_chain::test_utils::{ generate_rand_block_and_blobs, generate_rand_block_and_data_columns, test_spec, NumBlobs, }; - use lighthouse_network::PeerId; use rand::SeedableRng; - use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; + use std::sync::Arc; + use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E, SignedBeaconBlock}; #[test] fn no_blobs_into_responses() { let spec = test_spec::(); - let peer_id = PeerId::random(); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec) .0 + .into() }) - .collect::>(); - let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; - let mut info = - RangeBlockComponentsRequest::::new(false, None, None, vec![peer_id], max_len); + .collect::>>>(); + let mut info = RangeBlockComponentsRequest::::new(false, None, None); // Send blocks and complete terminate response - for block in blocks { - info.add_block_response(Some(block.into())); - } - info.add_block_response(None); + info.add_blocks(blocks); // Assert response is finished and RpcBlocks can be constructed assert!(info.is_finished()); @@ -279,7 +261,6 @@ mod tests { #[test] fn empty_blobs_into_responses() { let spec = test_spec::(); - let peer_id = PeerId::random(); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -291,19 +272,15 @@ mod tests { &spec, ) .0 + .into() }) - .collect::>(); - let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; - let mut info = - RangeBlockComponentsRequest::::new(true, None, None, vec![peer_id], max_len); + .collect::>>>(); + let mut info = RangeBlockComponentsRequest::::new(true, None, None); // Send blocks and complete terminate response - for block in blocks { - info.add_block_response(Some(block.into())); - } - info.add_block_response(None); + info.add_blocks(blocks); // Expect no blobs returned - info.add_sidecar_response(None); + info.add_blobs(vec![]); // Assert response is finished and RpcBlocks can be constructed, even if blobs weren't returned. // This makes sure we don't expect blobs here when they have expired. Checking this logic should @@ -316,46 +293,35 @@ mod tests { fn rpc_block_with_custody_columns() { let spec = test_spec::(); let expects_custody_columns = vec![1, 2, 3, 4]; - let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { generate_rand_block_and_data_columns::( - ForkName::Deneb, + ForkName::Fulu, NumBlobs::Number(1), &mut rng, &spec, ) }) .collect::>(); - let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; let mut info = RangeBlockComponentsRequest::::new( false, Some(expects_custody_columns.clone()), Some(expects_custody_columns.len()), - vec![PeerId::random()], - max_len, ); // Send blocks and complete terminate response - for block in &blocks { - info.add_block_response(Some(block.0.clone().into())); - } - info.add_block_response(None); + info.add_blocks(blocks.iter().map(|b| b.0.clone().into()).collect()); // Assert response is not finished assert!(!info.is_finished()); - // Send data columns interleaved - for block in &blocks { - for column in &block.1 { - if expects_custody_columns.contains(&column.index) { - info.add_data_column(Some(column.clone())); - } - } - } - - // Terminate the requests - for (i, _column_index) in expects_custody_columns.iter().enumerate() { - info.add_data_column(None); + // Send data columns + for (i, &column_index) in expects_custody_columns.iter().enumerate() { + info.add_custody_columns( + blocks + .iter() + .flat_map(|b| b.1.iter().filter(|d| d.index == column_index).cloned()) + .collect(), + ); if i < expects_custody_columns.len() - 1 { assert!( @@ -377,48 +343,52 @@ mod tests { #[test] fn rpc_block_with_custody_columns_batched() { let spec = test_spec::(); - let expects_custody_columns = vec![1, 2, 3, 4]; - let num_of_data_column_requests = 2; + let batched_column_requests = [vec![1_u64, 2], vec![3, 4]]; + let expects_custody_columns = batched_column_requests + .iter() + .flatten() + .cloned() + .collect::>(); + let custody_column_request_ids = + (0..batched_column_requests.len() as u32).collect::>(); + let num_of_data_column_requests = custody_column_request_ids.len(); + + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(num_of_data_column_requests), + ); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { generate_rand_block_and_data_columns::( - ForkName::Deneb, + ForkName::Fulu, NumBlobs::Number(1), &mut rng, &spec, ) }) .collect::>(); - let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; - let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(num_of_data_column_requests), - vec![PeerId::random()], - max_len, - ); + // Send blocks and complete terminate response - for block in &blocks { - info.add_block_response(Some(block.0.clone().into())); - } - info.add_block_response(None); + info.add_blocks(blocks.iter().map(|b| b.0.clone().into()).collect()); // Assert response is not finished assert!(!info.is_finished()); - // Send data columns interleaved - for block in &blocks { - for column in &block.1 { - if expects_custody_columns.contains(&column.index) { - info.add_data_column(Some(column.clone())); - } - } - } + for (i, column_indices) in batched_column_requests.iter().enumerate() { + // Send the set of columns in the same batch request + info.add_custody_columns( + blocks + .iter() + .flat_map(|b| { + b.1.iter() + .filter(|d| column_indices.contains(&d.index)) + .cloned() + }) + .collect::>(), + ); - // Terminate the requests - for i in 0..num_of_data_column_requests { - info.add_data_column(None); if i < num_of_data_column_requests - 1 { assert!( !info.is_finished(), diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 2df8b5f94c..fc31e83727 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -36,7 +36,7 @@ use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::block_lookups::BlockLookups; use super::network_context::{ - BlockOrBlob, CustodyByRootResult, RangeRequestId, RpcEvent, SyncNetworkContext, + CustodyByRootResult, RangeBlockComponent, RangeRequestId, RpcEvent, SyncNetworkContext, }; use super::peer_sampling::{Sampling, SamplingConfig, SamplingResult}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; @@ -47,7 +47,6 @@ use crate::status::ToStatusMessage; use crate::sync::block_lookups::{ BlobRequestState, BlockComponent, BlockRequestState, CustodyRequestState, DownloadResult, }; -use crate::sync::block_sidecar_coupling::RangeBlockComponentsRequest; use crate::sync::network_context::PeerGroup; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::validator_monitor::timestamp_now; @@ -57,8 +56,9 @@ use beacon_chain::{ use futures::StreamExt; use lighthouse_network::rpc::RPCError; use lighthouse_network::service::api_types::{ - CustodyRequester, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SamplingId, - SamplingRequester, SingleLookupReqId, SyncRequestId, + BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, CustodyRequester, + DataColumnsByRangeRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, + SamplingId, SamplingRequester, SingleLookupReqId, SyncRequestId, }; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; @@ -69,7 +69,9 @@ use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + BlobSidecar, DataColumnSidecar, EthSpec, ForkContext, Hash256, SignedBeaconBlock, Slot, +}; #[cfg(test)] use types::ColumnIndex; @@ -258,10 +260,11 @@ pub fn spawn( network_send: mpsc::UnboundedSender>, beacon_processor: Arc>, sync_recv: mpsc::UnboundedReceiver>, + fork_context: Arc, log: slog::Logger, ) { assert!( - beacon_chain.spec.max_request_blocks >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, + beacon_chain.spec.max_request_blocks(fork_context.current_fork()) as u64 >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" ); @@ -272,6 +275,7 @@ pub fn spawn( beacon_processor, sync_recv, SamplingConfig::Default, + fork_context, log.clone(), ); @@ -287,6 +291,7 @@ impl SyncManager { beacon_processor: Arc>, sync_recv: mpsc::UnboundedReceiver>, sampling_config: SamplingConfig, + fork_context: Arc, log: slog::Logger, ) -> Self { let network_globals = beacon_processor.network_globals.clone(); @@ -297,6 +302,7 @@ impl SyncManager { network_send, beacon_processor.clone(), beacon_chain.clone(), + fork_context.clone(), log.clone(), ), range_sync: RangeSync::new( @@ -485,36 +491,14 @@ impl SyncManager { SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response(req_id, peer_id, RpcEvent::RPCError(error)) } - SyncRequestId::RangeBlockAndBlobs { id } => { - if let Some(sender_id) = self.network.range_request_failed(id) { - match sender_id { - RangeRequestId::RangeSync { chain_id, batch_id } => { - self.range_sync.inject_error( - &mut self.network, - peer_id, - batch_id, - chain_id, - id, - ); - self.update_sync_state(); - } - RangeRequestId::BackfillSync { batch_id } => match self - .backfill_sync - .inject_error(&mut self.network, batch_id, &peer_id, id) - { - Ok(_) => {} - Err(_) => self.update_sync_state(), - }, - } - } else { - debug!( - self.log, - "RPC error for range request has no associated entry in network context, ungraceful disconnect"; - "peer_id" => %peer_id, - "request_id" => %id, - "error" => ?error, - ); - } + SyncRequestId::BlocksByRange(req_id) => { + self.on_blocks_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) + } + SyncRequestId::BlobsByRange(req_id) => { + self.on_blobs_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) + } + SyncRequestId::DataColumnsByRange(req_id) => { + self.on_data_columns_by_range_response(req_id, peer_id, RpcEvent::RPCError(error)) } } } @@ -1045,14 +1029,13 @@ impl SyncManager { SyncRequestId::SingleBlock { id } => self.on_single_block_response( id, peer_id, - match block { - Some(block) => RpcEvent::Response(block, seen_timestamp), - None => RpcEvent::StreamTermination, - }, + RpcEvent::from_chunk(block, seen_timestamp), + ), + SyncRequestId::BlocksByRange(id) => self.on_blocks_by_range_response( + id, + peer_id, + RpcEvent::from_chunk(block, seen_timestamp), ), - SyncRequestId::RangeBlockAndBlobs { id } => { - self.range_block_and_blobs_response(id, peer_id, block.into()) - } _ => { crit!(self.log, "bad request id for block"; "peer_id" => %peer_id ); } @@ -1088,14 +1071,13 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => self.on_single_blob_response( id, peer_id, - match blob { - Some(blob) => RpcEvent::Response(blob, seen_timestamp), - None => RpcEvent::StreamTermination, - }, + RpcEvent::from_chunk(blob, seen_timestamp), + ), + SyncRequestId::BlobsByRange(id) => self.on_blobs_by_range_response( + id, + peer_id, + RpcEvent::from_chunk(blob, seen_timestamp), ), - SyncRequestId::RangeBlockAndBlobs { id } => { - self.range_block_and_blobs_response(id, peer_id, blob.into()) - } _ => { crit!(self.log, "bad request id for blob"; "peer_id" => %peer_id); } @@ -1114,19 +1096,14 @@ impl SyncManager { self.on_data_columns_by_root_response( req_id, peer_id, - match data_column { - Some(data_column) => RpcEvent::Response(data_column, seen_timestamp), - None => RpcEvent::StreamTermination, - }, - ); - } - SyncRequestId::RangeBlockAndBlobs { id } => { - self.range_block_and_blobs_response( - id, - peer_id, - BlockOrBlob::CustodyColumns(data_column), + RpcEvent::from_chunk(data_column, seen_timestamp), ); } + SyncRequestId::DataColumnsByRange(id) => self.on_data_columns_by_range_response( + id, + peer_id, + RpcEvent::from_chunk(data_column, seen_timestamp), + ), _ => { crit!(self.log, "bad request id for data_column"; "peer_id" => %peer_id); } @@ -1182,6 +1159,54 @@ impl SyncManager { } } + fn on_blocks_by_range_response( + &mut self, + id: BlocksByRangeRequestId, + peer_id: PeerId, + block: RpcEvent>>, + ) { + if let Some(resp) = self.network.on_blocks_by_range_response(id, peer_id, block) { + self.on_range_components_response( + id.parent_request_id, + peer_id, + RangeBlockComponent::Block(resp), + ); + } + } + + fn on_blobs_by_range_response( + &mut self, + id: BlobsByRangeRequestId, + peer_id: PeerId, + blob: RpcEvent>>, + ) { + if let Some(resp) = self.network.on_blobs_by_range_response(id, peer_id, blob) { + self.on_range_components_response( + id.parent_request_id, + peer_id, + RangeBlockComponent::Blob(resp), + ); + } + } + + fn on_data_columns_by_range_response( + &mut self, + id: DataColumnsByRangeRequestId, + peer_id: PeerId, + data_column: RpcEvent>>, + ) { + if let Some(resp) = self + .network + .on_data_columns_by_range_response(id, peer_id, data_column) + { + self.on_range_components_response( + id.parent_request_id, + peer_id, + RangeBlockComponent::CustodyColumns(resp), + ); + } + } + fn on_custody_by_root_result( &mut self, requester: CustodyRequester, @@ -1224,27 +1249,26 @@ impl SyncManager { /// Handles receiving a response for a range sync request that should have both blocks and /// blobs. - fn range_block_and_blobs_response( + fn on_range_components_response( &mut self, - id: Id, + range_request_id: ComponentsByRangeRequestId, peer_id: PeerId, - block_or_blob: BlockOrBlob, + range_block_component: RangeBlockComponent, ) { if let Some(resp) = self .network - .range_block_and_blob_response(id, block_or_blob) + .range_block_component_response(range_request_id, range_block_component) { - let epoch = resp.sender_id.batch_id(); - match resp.responses { + match resp { Ok(blocks) => { - match resp.sender_id { + match range_request_id.requester { RangeRequestId::RangeSync { chain_id, batch_id } => { self.range_sync.blocks_by_range_response( &mut self.network, peer_id, chain_id, batch_id, - id, + range_request_id.id, blocks, ); self.update_sync_state(); @@ -1254,7 +1278,7 @@ impl SyncManager { &mut self.network, batch_id, &peer_id, - id, + range_request_id.id, blocks, ) { Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), @@ -1268,36 +1292,25 @@ impl SyncManager { } } } - Err(e) => { - // Re-insert the request so we can retry - self.network.insert_range_blocks_and_blobs_request( - id, - resp.sender_id, - RangeBlockComponentsRequest::new( - resp.expects_blobs, - resp.expects_custody_columns, - None, - vec![], - self.chain.spec.max_blobs_per_block(epoch) as usize, - ), - ); - // inform range that the request needs to be treated as failed - // With time we will want to downgrade this log - warn!( - self.log, - "Blocks and blobs request for range received invalid data"; - "peer_id" => %peer_id, - "sender_id" => ?resp.sender_id, - "error" => e.clone() - ); - let id = SyncRequestId::RangeBlockAndBlobs { id }; - self.network.report_peer( - peer_id, - PeerAction::MidToleranceError, - "block_blob_faulty_batch", - ); - self.inject_error(peer_id, id, RPCError::InvalidData(e)) - } + Err(_) => match range_request_id.requester { + RangeRequestId::RangeSync { chain_id, batch_id } => { + self.range_sync.inject_error( + &mut self.network, + peer_id, + batch_id, + chain_id, + range_request_id.id, + ); + self.update_sync_state(); + } + RangeRequestId::BackfillSync { batch_id } => match self + .backfill_sync + .inject_error(&mut self.network, batch_id, &peer_id, range_request_id.id) + { + Ok(_) => {} + Err(_) => self.update_sync_state(), + }, + }, } } } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index f899936128..b03a446add 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -5,7 +5,7 @@ use self::custody::{ActiveCustodyRequest, Error as CustodyRequestError}; pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlockRequest}; use super::block_sidecar_coupling::RangeBlockComponentsRequest; use super::manager::BlockProcessType; -use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; +use super::range_sync::ByRangeRequestType; use super::SyncMessage; use crate::metrics; use crate::network_beacon_processor::NetworkBeaconProcessor; @@ -17,13 +17,12 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; -use lighthouse_network::rpc::methods::{ - BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, - OldBlocksByRangeRequestV1, OldBlocksByRangeRequestV2, -}; +use lighthouse_network::rpc::methods::{BlobsByRangeRequest, DataColumnsByRangeRequest}; use lighthouse_network::rpc::{BlocksByRangeRequest, GoodbyeReason, RPCError, RequestType}; +pub use lighthouse_network::service::api_types::RangeRequestId; use lighthouse_network::service::api_types::{ - AppRequestId, CustodyId, CustodyRequester, DataColumnsByRootRequestId, + AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, ComponentsByRangeRequestId, + CustodyId, CustodyRequester, DataColumnsByRangeRequestId, DataColumnsByRootRequestId, DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, }; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; @@ -32,51 +31,25 @@ use rand::prelude::IteratorRandom; use rand::thread_rng; pub use requests::LookupVerifyError; use requests::{ - ActiveRequests, BlobsByRootRequestItems, BlocksByRootRequestItems, - DataColumnsByRootRequestItems, + ActiveRequests, BlobsByRangeRequestItems, BlobsByRootRequestItems, BlocksByRangeRequestItems, + BlocksByRootRequestItems, DataColumnsByRangeRequestItems, DataColumnsByRootRequestItems, }; use slog::{debug, error, warn}; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::fmt::Debug; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - BlobSidecar, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, - SignedBeaconBlock, Slot, + BlobSidecar, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, ForkContext, + Hash256, SignedBeaconBlock, Slot, }; pub mod custody; mod requests; -pub struct BlocksAndBlobsByRangeResponse { - pub sender_id: RangeRequestId, - pub responses: Result>, String>, - pub expects_blobs: bool, - pub expects_custody_columns: Option>, -} - -#[derive(Debug, Clone, Copy)] -pub enum RangeRequestId { - RangeSync { - chain_id: ChainId, - batch_id: BatchId, - }, - BackfillSync { - batch_id: BatchId, - }, -} - -impl RangeRequestId { - pub fn batch_id(&self) -> BatchId { - match self { - RangeRequestId::RangeSync { batch_id, .. } => *batch_id, - RangeRequestId::BackfillSync { batch_id, .. } => *batch_id, - } - } -} - #[derive(Debug)] pub enum RpcEvent { StreamTermination, @@ -84,6 +57,15 @@ pub enum RpcEvent { RPCError(RPCError), } +impl RpcEvent { + pub fn from_chunk(chunk: Option, seen_timestamp: Duration) -> Self { + match chunk { + Some(item) => RpcEvent::Response(item, seen_timestamp), + None => RpcEvent::StreamTermination, + } + } +} + pub type RpcResponseResult = Result<(T, Duration), RpcResponseError>; pub type CustodyByRootResult = Result<(DataColumnSidecarList, PeerGroup), RpcResponseError>; @@ -93,6 +75,7 @@ pub enum RpcResponseError { RpcError(RPCError), VerifyError(LookupVerifyError), CustodyRequestError(CustodyRequestError), + BlockComponentCouplingError(String), } #[derive(Debug, PartialEq, Eq)] @@ -110,16 +93,6 @@ pub enum SendErrorProcessor { ProcessorNotAvailable, } -impl std::fmt::Display for RpcResponseError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - RpcResponseError::RpcError(e) => write!(f, "RPC Error: {:?}", e), - RpcResponseError::VerifyError(e) => write!(f, "Lookup Verify Error: {:?}", e), - RpcResponseError::CustodyRequestError(e) => write!(f, "Custody Request Error: {:?}", e), - } - } -} - impl From for RpcResponseError { fn from(e: RPCError) -> Self { RpcResponseError::RpcError(e) @@ -199,13 +172,22 @@ pub struct SyncNetworkContext { /// A mapping of active DataColumnsByRoot requests data_columns_by_root_requests: ActiveRequests>, + /// A mapping of active BlocksByRange requests + blocks_by_range_requests: + ActiveRequests>, + /// A mapping of active BlobsByRange requests + blobs_by_range_requests: + ActiveRequests>, + /// A mapping of active DataColumnsByRange requests + data_columns_by_range_requests: + ActiveRequests>, /// Mapping of active custody column requests for a block root custody_by_root_requests: FnvHashMap>, - /// BlocksByRange requests paired with BlobsByRange - range_block_components_requests: - FnvHashMap)>, + /// BlocksByRange requests paired with other ByRange requests for data components + components_by_range_requests: + FnvHashMap>, /// Whether the ee is online. If it's not, we don't allow access to the /// `beacon_processor_send`. @@ -216,27 +198,17 @@ pub struct SyncNetworkContext { pub chain: Arc>, + fork_context: Arc, + /// Logger for the `SyncNetworkContext`. pub log: slog::Logger, } /// Small enumeration to make dealing with block and blob requests easier. -pub enum BlockOrBlob { - Block(Option>>), - Blob(Option>>), - CustodyColumns(Option>>), -} - -impl From>>> for BlockOrBlob { - fn from(block: Option>>) -> Self { - BlockOrBlob::Block(block) - } -} - -impl From>>> for BlockOrBlob { - fn from(blob: Option>>) -> Self { - BlockOrBlob::Blob(blob) - } +pub enum RangeBlockComponent { + Block(RpcResponseResult>>>), + Blob(RpcResponseResult>>>), + CustodyColumns(RpcResponseResult>>>), } impl SyncNetworkContext { @@ -244,6 +216,7 @@ impl SyncNetworkContext { network_send: mpsc::UnboundedSender>, network_beacon_processor: Arc>, chain: Arc>, + fork_context: Arc, log: slog::Logger, ) -> Self { SyncNetworkContext { @@ -253,10 +226,14 @@ impl SyncNetworkContext { blocks_by_root_requests: ActiveRequests::new("blocks_by_root"), blobs_by_root_requests: ActiveRequests::new("blobs_by_root"), data_columns_by_root_requests: ActiveRequests::new("data_columns_by_root"), + blocks_by_range_requests: ActiveRequests::new("blocks_by_range"), + blobs_by_range_requests: ActiveRequests::new("blobs_by_range"), + data_columns_by_range_requests: ActiveRequests::new("data_columns_by_range"), custody_by_root_requests: <_>::default(), - range_block_components_requests: FnvHashMap::default(), + components_by_range_requests: FnvHashMap::default(), network_beacon_processor, chain, + fork_context, log, } } @@ -268,37 +245,60 @@ impl SyncNetworkContext { /// Returns the ids of all the requests made to the given peer_id. pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Vec { - let failed_range_ids = - self.range_block_components_requests - .iter() - .filter_map(|(id, request)| { - if request.1.peer_ids.contains(peer_id) { - Some(SyncRequestId::RangeBlockAndBlobs { id: *id }) - } else { - None - } - }); + // Note: using destructuring pattern without a default case to make sure we don't forget to + // add new request types to this function. Otherwise, lookup sync can break and lookups + // will get stuck if a peer disconnects during an active requests. + let Self { + network_send: _, + request_id: _, + blocks_by_root_requests, + blobs_by_root_requests, + data_columns_by_root_requests, + blocks_by_range_requests, + blobs_by_range_requests, + data_columns_by_range_requests, + // custody_by_root_requests is a meta request of data_columns_by_root_requests + custody_by_root_requests: _, + // components_by_range_requests is a meta request of various _by_range requests + components_by_range_requests: _, + execution_engine_state: _, + network_beacon_processor: _, + chain: _, + fork_context: _, + log: _, + } = self; - let failed_block_ids = self - .blocks_by_root_requests + let blocks_by_root_ids = blocks_by_root_requests .active_requests_of_peer(peer_id) .into_iter() .map(|id| SyncRequestId::SingleBlock { id: *id }); - let failed_blob_ids = self - .blobs_by_root_requests + let blobs_by_root_ids = blobs_by_root_requests .active_requests_of_peer(peer_id) .into_iter() .map(|id| SyncRequestId::SingleBlob { id: *id }); - let failed_data_column_by_root_ids = self - .data_columns_by_root_requests + let data_column_by_root_ids = data_columns_by_root_requests .active_requests_of_peer(peer_id) .into_iter() .map(|req_id| SyncRequestId::DataColumnsByRoot(*req_id)); + let blocks_by_range_ids = blocks_by_range_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::BlocksByRange(*req_id)); + let blobs_by_range_ids = blobs_by_range_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::BlobsByRange(*req_id)); + let data_column_by_range_ids = data_columns_by_range_requests + .active_requests_of_peer(peer_id) + .into_iter() + .map(|req_id| SyncRequestId::DataColumnsByRange(*req_id)); - failed_range_ids - .chain(failed_block_ids) - .chain(failed_blob_ids) - .chain(failed_data_column_by_root_ids) + blocks_by_root_ids + .chain(blobs_by_root_ids) + .chain(data_column_by_root_ids) + .chain(blocks_by_range_ids) + .chain(blobs_by_range_ids) + .chain(data_column_by_range_ids) .collect() } @@ -357,116 +357,62 @@ impl SyncNetworkContext { peer_id: PeerId, batch_type: ByRangeRequestType, request: BlocksByRangeRequest, - sender_id: RangeRequestId, + requester: RangeRequestId, ) -> Result { - let epoch = Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()); - let id = self.next_id(); - let mut requested_peers = vec![peer_id]; - debug!( - self.log, - "Sending BlocksByRange request"; - "method" => "BlocksByRange", - "count" => request.count(), - "epoch" => epoch, - "peer" => %peer_id, - ); - let rpc_request = match request { - BlocksByRangeRequest::V1(ref req) => { - RequestType::BlocksByRange(OldBlocksByRangeRequest::V1(OldBlocksByRangeRequestV1 { - start_slot: req.start_slot, - count: req.count, - step: 1, - })) - } - BlocksByRangeRequest::V2(ref req) => { - RequestType::BlocksByRange(OldBlocksByRangeRequest::V2(OldBlocksByRangeRequestV2 { - start_slot: req.start_slot, - count: req.count, - step: 1, - })) - } + // Create the overall components_by_range request ID before its individual components + let id = ComponentsByRangeRequestId { + id: self.next_id(), + requester, }; - self.network_send - .send(NetworkMessage::SendRequest { + + let _blocks_req_id = self.send_blocks_by_range_request(peer_id, request.clone(), id)?; + + let blobs_req_id = if matches!(batch_type, ByRangeRequestType::BlocksAndBlobs) { + Some(self.send_blobs_by_range_request( peer_id, - request: rpc_request, - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - }) - .map_err(|_| RpcRequestSendError::NetworkSendError)?; - - let expected_blobs = if matches!(batch_type, ByRangeRequestType::BlocksAndBlobs) { - debug!( - self.log, - "Sending BlobsByRange requests"; - "method" => "BlobsByRange", - "count" => request.count(), - "epoch" => epoch, - "peer" => %peer_id, - ); - - // Create the blob request based on the blocks request. - self.network_send - .send(NetworkMessage::SendRequest { - peer_id, - request: RequestType::BlobsByRange(BlobsByRangeRequest { - start_slot: *request.start_slot(), - count: *request.count(), - }), - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - }) - .map_err(|_| RpcRequestSendError::NetworkSendError)?; - true + BlobsByRangeRequest { + start_slot: *request.start_slot(), + count: *request.count(), + }, + id, + )?) } else { - false + None }; - let (expects_columns, num_of_column_req) = + let (expects_columns, data_column_requests) = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { let column_indexes = self.network_globals().sampling_columns.clone(); - let mut num_of_custody_column_req = 0; - for (peer_id, columns_by_range_request) in - self.make_columns_by_range_requests(request, &column_indexes)? - { - requested_peers.push(peer_id); - - debug!( - self.log, - "Sending DataColumnsByRange requests"; - "method" => "DataColumnsByRange", - "count" => columns_by_range_request.count, - "epoch" => epoch, - "columns" => ?columns_by_range_request.columns, - "peer" => %peer_id, - ); - - self.send_network_msg(NetworkMessage::SendRequest { - peer_id, - request: RequestType::DataColumnsByRange(columns_by_range_request), - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + let data_column_requests = self + .make_columns_by_range_requests(request, &column_indexes)? + .into_iter() + .map(|(peer_id, columns_by_range_request)| { + self.send_data_columns_by_range_request( + peer_id, + columns_by_range_request, + id, + ) }) - .map_err(|_| RpcRequestSendError::NetworkSendError)?; + .collect::, _>>()?; - num_of_custody_column_req += 1; - } - - (Some(column_indexes), Some(num_of_custody_column_req)) + ( + Some(column_indexes.into_iter().collect::>()), + Some(data_column_requests), + ) } else { (None, None) }; - // TODO(pawan): this would break if a batch contains multiple epochs - let max_blobs_len = self.chain.spec.max_blobs_per_block(epoch); + let expected_blobs = blobs_req_id.is_some(); let info = RangeBlockComponentsRequest::new( expected_blobs, - expects_columns.map(|c| c.into_iter().collect()), - num_of_column_req, - requested_peers, - max_blobs_len as usize, + expects_columns, + data_column_requests.map(|items| items.len()), ); - self.range_block_components_requests - .insert(id, (sender_id, info)); - Ok(id) + self.components_by_range_requests.insert(id, info); + + Ok(id.id) } fn make_columns_by_range_requests( @@ -503,54 +449,43 @@ impl SyncNetworkContext { Ok(peer_id_to_request_map) } - pub fn range_request_failed(&mut self, request_id: Id) -> Option { - let sender_id = self - .range_block_components_requests - .remove(&request_id) - .map(|(sender_id, _info)| sender_id); - if let Some(sender_id) = sender_id { - debug!( - self.log, - "Sync range request failed"; - "request_id" => request_id, - "sender_id" => ?sender_id - ); - Some(sender_id) - } else { - debug!(self.log, "Sync range request failed"; "request_id" => request_id); - None - } - } - /// Received a blocks by range or blobs by range response for a request that couples blocks ' /// and blobs. - pub fn range_block_and_blob_response( + pub fn range_block_component_response( &mut self, - request_id: Id, - block_or_blob: BlockOrBlob, - ) -> Option> { - let Entry::Occupied(mut entry) = self.range_block_components_requests.entry(request_id) - else { + id: ComponentsByRangeRequestId, + range_block_component: RangeBlockComponent, + ) -> Option>, RpcResponseError>> { + let Entry::Occupied(mut entry) = self.components_by_range_requests.entry(id) else { metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]); return None; }; - let (_, info) = entry.get_mut(); - match block_or_blob { - BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), - BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), - BlockOrBlob::CustodyColumns(column) => info.add_data_column(column), + if let Err(e) = { + let request = entry.get_mut(); + match range_block_component { + RangeBlockComponent::Block(resp) => resp.map(|(blocks, _)| { + request.add_blocks(blocks); + }), + RangeBlockComponent::Blob(resp) => resp.map(|(blobs, _)| { + request.add_blobs(blobs); + }), + RangeBlockComponent::CustodyColumns(resp) => resp.map(|(custody_columns, _)| { + request.add_custody_columns(custody_columns); + }), + } + } { + entry.remove(); + return Some(Err(e)); } - if info.is_finished() { + + if entry.get_mut().is_finished() { // If the request is finished, dequeue everything - let (sender_id, info) = entry.remove(); - let (expects_blobs, expects_custody_columns) = info.get_requirements(); - Some(BlocksAndBlobsByRangeResponse { - sender_id, - responses: info.into_responses(&self.chain.spec), - expects_blobs, - expects_custody_columns, - }) + let request = entry.remove(); + let blocks = request + .into_responses(&self.chain.spec) + .map_err(RpcResponseError::BlockComponentCouplingError); + Some(blocks) } else { None } @@ -601,17 +536,10 @@ impl SyncNetworkContext { } } - let req_id = self.next_id(); - let id = SingleLookupReqId { lookup_id, req_id }; - - debug!( - self.log, - "Sending BlocksByRoot Request"; - "method" => "BlocksByRoot", - "block_root" => ?block_root, - "peer" => %peer_id, - "id" => ?id - ); + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; let request = BlocksByRootSingleRequest(block_root); @@ -624,11 +552,20 @@ impl SyncNetworkContext { self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: RequestType::BlocksByRoot(request.into_request(&self.chain.spec)), + request: RequestType::BlocksByRoot(request.into_request(&self.fork_context)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; + debug!( + self.log, + "Sync RPC request sent"; + "method" => "BlocksByRoot", + "block_root" => ?block_root, + "peer" => %peer_id, + "id" => %id + ); + self.blocks_by_root_requests.insert( id, peer_id, @@ -638,7 +575,7 @@ impl SyncNetworkContext { BlocksByRootRequestItems::new(request), ); - Ok(LookupRequestResult::RequestSent(req_id)) + Ok(LookupRequestResult::RequestSent(id.req_id)) } /// Request necessary blobs for `block_root`. Requests only the necessary blobs by checking: @@ -684,33 +621,35 @@ impl SyncNetworkContext { return Ok(LookupRequestResult::NoRequestNeeded("no indices to fetch")); } - let req_id = self.next_id(); - let id = SingleLookupReqId { lookup_id, req_id }; - - debug!( - self.log, - "Sending BlobsByRoot Request"; - "method" => "BlobsByRoot", - "block_root" => ?block_root, - "blob_indices" => ?indices, - "peer" => %peer_id, - "id" => ?id - ); + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; let request = BlobsByRootSingleBlockRequest { block_root, - indices, + indices: indices.clone(), }; // Lookup sync event safety: Refer to `Self::block_lookup_request` `network_send.send` call self.network_send .send(NetworkMessage::SendRequest { peer_id, - request: RequestType::BlobsByRoot(request.clone().into_request(&self.chain.spec)), + request: RequestType::BlobsByRoot(request.clone().into_request(&self.fork_context)), request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; + debug!( + self.log, + "Sync RPC request sent"; + "method" => "BlobsByRoot", + "block_root" => ?block_root, + "blob_indices" => ?indices, + "peer" => %peer_id, + "id" => %id + ); + self.blobs_by_root_requests.insert( id, peer_id, @@ -721,7 +660,7 @@ impl SyncNetworkContext { BlobsByRootRequestItems::new(request), ); - Ok(LookupRequestResult::RequestSent(req_id)) + Ok(LookupRequestResult::RequestSent(id.req_id)) } /// Request to send a single `data_columns_by_root` request to the network. @@ -732,35 +671,35 @@ impl SyncNetworkContext { request: DataColumnsByRootSingleBlockRequest, expect_max_responses: bool, ) -> Result, &'static str> { - let req_id = DataColumnsByRootRequestId { + let id = DataColumnsByRootRequestId { id: self.next_id(), requester, }; - debug!( - self.log, - "Sending DataColumnsByRoot Request"; - "method" => "DataColumnsByRoot", - "block_root" => ?request.block_root, - "indices" => ?request.indices, - "peer" => %peer_id, - "requester" => ?requester, - "req_id" => %req_id, - ); self.send_network_msg(NetworkMessage::SendRequest { peer_id, request: RequestType::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(req_id)), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(id)), })?; + debug!( + self.log, + "Sync RPC request sent"; + "method" => "DataColumnsByRoot", + "block_root" => ?request.block_root, + "indices" => ?request.indices, + "peer" => %peer_id, + "id" => %id, + ); + self.data_columns_by_root_requests.insert( - req_id, + id, peer_id, expect_max_responses, DataColumnsByRootRequestItems::new(request), ); - Ok(LookupRequestResult::RequestSent(req_id)) + Ok(LookupRequestResult::RequestSent(id)) } /// Request to fetch all needed custody columns of a specific block. This function may not send @@ -793,15 +732,17 @@ impl SyncNetworkContext { return Ok(LookupRequestResult::NoRequestNeeded("no indices to fetch")); } - let req_id = self.next_id(); - let id = SingleLookupReqId { lookup_id, req_id }; + let id = SingleLookupReqId { + lookup_id, + req_id: self.next_id(), + }; debug!( self.log, "Starting custody columns request"; "block_root" => ?block_root, "indices" => ?custody_indexes_to_fetch, - "id" => ?id + "id" => %id ); let requester = CustodyRequester(id); @@ -820,12 +761,134 @@ impl SyncNetworkContext { // created cannot return data immediately, it must send some request to the network // first. And there must exist some request, `custody_indexes_to_fetch` is not empty. self.custody_by_root_requests.insert(requester, request); - Ok(LookupRequestResult::RequestSent(req_id)) + Ok(LookupRequestResult::RequestSent(id.req_id)) } Err(e) => Err(RpcRequestSendError::CustodyRequestError(e)), } } + fn send_blocks_by_range_request( + &mut self, + peer_id: PeerId, + request: BlocksByRangeRequest, + parent_request_id: ComponentsByRangeRequestId, + ) -> Result { + let id = BlocksByRangeRequestId { + id: self.next_id(), + parent_request_id, + }; + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlocksByRange(request.clone().into()), + request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; + + debug!( + self.log, + "Sync RPC request sent"; + "method" => "BlocksByRange", + "slots" => request.count(), + "epoch" => Slot::new(*request.start_slot()).epoch(T::EthSpec::slots_per_epoch()), + "peer" => %peer_id, + "id" => %id, + ); + + self.blocks_by_range_requests.insert( + id, + peer_id, + // false = do not enforce max_requests are returned for *_by_range methods. We don't + // know if there are missed blocks. + false, + BlocksByRangeRequestItems::new(request), + ); + Ok(id) + } + + fn send_blobs_by_range_request( + &mut self, + peer_id: PeerId, + request: BlobsByRangeRequest, + parent_request_id: ComponentsByRangeRequestId, + ) -> Result { + let id = BlobsByRangeRequestId { + id: self.next_id(), + parent_request_id, + }; + let request_epoch = Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()); + + // Create the blob request based on the blocks request. + self.network_send + .send(NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlobsByRange(request.clone()), + request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; + + debug!( + self.log, + "Sync RPC request sent"; + "method" => "BlobsByRange", + "slots" => request.count, + "epoch" => request_epoch, + "peer" => %peer_id, + "id" => %id, + ); + + let max_blobs_per_block = self.chain.spec.max_blobs_per_block(request_epoch); + self.blobs_by_range_requests.insert( + id, + peer_id, + // false = do not enforce max_requests are returned for *_by_range methods. We don't + // know if there are missed blocks. + false, + BlobsByRangeRequestItems::new(request, max_blobs_per_block), + ); + Ok(id) + } + + fn send_data_columns_by_range_request( + &mut self, + peer_id: PeerId, + request: DataColumnsByRangeRequest, + parent_request_id: ComponentsByRangeRequestId, + ) -> Result { + let id = DataColumnsByRangeRequestId { + id: self.next_id(), + parent_request_id, + }; + + self.send_network_msg(NetworkMessage::SendRequest { + peer_id, + request: RequestType::DataColumnsByRange(request.clone()), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), + }) + .map_err(|_| RpcRequestSendError::NetworkSendError)?; + + debug!( + self.log, + "Sync RPC request sent"; + "method" => "DataColumnsByRange", + "slots" => request.count, + "epoch" => Slot::new(request.start_slot).epoch(T::EthSpec::slots_per_epoch()), + "columns" => ?request.columns, + "peer" => %peer_id, + "id" => %id, + ); + + self.data_columns_by_range_requests.insert( + id, + peer_id, + // false = do not enforce max_requests are returned for *_by_range methods. We don't + // know if there are missed blocks. + false, + DataColumnsByRangeRequestItems::new(request), + ); + Ok(id) + } + pub fn is_execution_engine_online(&self) -> bool { self.execution_engine_state == EngineState::Online } @@ -924,16 +987,6 @@ impl SyncNetworkContext { } } - pub fn insert_range_blocks_and_blobs_request( - &mut self, - id: Id, - sender_id: RangeRequestId, - info: RangeBlockComponentsRequest, - ) { - self.range_block_components_requests - .insert(id, (sender_id, info)); - } - /// Attempt to make progress on all custody_by_root requests. Some request may be stale waiting /// for custody peers. Returns a Vec of results as zero or more requests may fail in this /// attempt. @@ -968,8 +1021,8 @@ impl SyncNetworkContext { peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>>> { - let response = self.blocks_by_root_requests.on_response(id, rpc_event); - let response = response.map(|res| { + let resp = self.blocks_by_root_requests.on_response(id, rpc_event); + let resp = resp.map(|res| { res.and_then(|(mut blocks, seen_timestamp)| { // Enforce that exactly one chunk = one block is returned. ReqResp behavior limits the // response count to at most 1. @@ -981,10 +1034,7 @@ impl SyncNetworkContext { } }) }); - if let Some(Err(RpcResponseError::VerifyError(e))) = &response { - self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - response + self.on_rpc_response_result(id, "BlocksByRoot", resp, peer_id, |_| 1) } pub(crate) fn on_single_blob_response( @@ -993,8 +1043,8 @@ impl SyncNetworkContext { peer_id: PeerId, rpc_event: RpcEvent>>, ) -> Option>> { - let response = self.blobs_by_root_requests.on_response(id, rpc_event); - let response = response.map(|res| { + let resp = self.blobs_by_root_requests.on_response(id, rpc_event); + let resp = resp.map(|res| { res.and_then(|(blobs, seen_timestamp)| { if let Some(max_len) = blobs .first() @@ -1013,10 +1063,7 @@ impl SyncNetworkContext { } }) }); - if let Some(Err(RpcResponseError::VerifyError(e))) = &response { - self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); - } - response + self.on_rpc_response_result(id, "BlobsByRoot", resp, peer_id, |_| 1) } #[allow(clippy::type_complexity)] @@ -1029,14 +1076,73 @@ impl SyncNetworkContext { let resp = self .data_columns_by_root_requests .on_response(id, rpc_event); - self.report_rpc_response_errors(resp, peer_id) + self.on_rpc_response_result(id, "DataColumnsByRoot", resp, peer_id, |_| 1) } - fn report_rpc_response_errors( + #[allow(clippy::type_complexity)] + pub(crate) fn on_blocks_by_range_response( &mut self, + id: BlocksByRangeRequestId, + peer_id: PeerId, + rpc_event: RpcEvent>>, + ) -> Option>>>> { + let resp = self.blocks_by_range_requests.on_response(id, rpc_event); + self.on_rpc_response_result(id, "BlocksByRange", resp, peer_id, |b| b.len()) + } + + #[allow(clippy::type_complexity)] + pub(crate) fn on_blobs_by_range_response( + &mut self, + id: BlobsByRangeRequestId, + peer_id: PeerId, + rpc_event: RpcEvent>>, + ) -> Option>>>> { + let resp = self.blobs_by_range_requests.on_response(id, rpc_event); + self.on_rpc_response_result(id, "BlobsByRangeRequest", resp, peer_id, |b| b.len()) + } + + #[allow(clippy::type_complexity)] + pub(crate) fn on_data_columns_by_range_response( + &mut self, + id: DataColumnsByRangeRequestId, + peer_id: PeerId, + rpc_event: RpcEvent>>, + ) -> Option>> { + let resp = self + .data_columns_by_range_requests + .on_response(id, rpc_event); + self.on_rpc_response_result(id, "DataColumnsByRange", resp, peer_id, |d| d.len()) + } + + fn on_rpc_response_result usize>( + &mut self, + id: I, + method: &'static str, resp: Option>, peer_id: PeerId, + get_count: F, ) -> Option> { + match &resp { + None => {} + Some(Ok((v, _))) => { + debug!( + self.log, + "Sync RPC request completed"; + "id" => %id, + "method" => method, + "count" => get_count(v) + ); + } + Some(Err(e)) => { + debug!( + self.log, + "Sync RPC request error"; + "id" => %id, + "method" => method, + "error" => ?e + ); + } + } if let Some(Err(RpcResponseError::VerifyError(e))) = &resp { self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); } @@ -1186,21 +1292,27 @@ impl SyncNetworkContext { } pub(crate) fn register_metrics(&self) { - metrics::set_gauge_vec( - &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, - &["blocks_by_root"], - self.blocks_by_root_requests.len() as i64, - ); - metrics::set_gauge_vec( - &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, - &["blobs_by_root"], - self.blobs_by_root_requests.len() as i64, - ); - metrics::set_gauge_vec( - &metrics::SYNC_ACTIVE_NETWORK_REQUESTS, - &["range_blocks"], - self.range_block_components_requests.len() as i64, - ); + for (id, count) in [ + ("blocks_by_root", self.blocks_by_root_requests.len()), + ("blobs_by_root", self.blobs_by_root_requests.len()), + ( + "data_columns_by_root", + self.data_columns_by_root_requests.len(), + ), + ("blocks_by_range", self.blocks_by_range_requests.len()), + ("blobs_by_range", self.blobs_by_range_requests.len()), + ( + "data_columns_by_range", + self.data_columns_by_range_requests.len(), + ), + ("custody_by_root", self.custody_by_root_requests.len()), + ( + "components_by_range", + self.components_by_range_requests.len(), + ), + ] { + metrics::set_gauge_vec(&metrics::SYNC_ACTIVE_NETWORK_REQUESTS, &[id], count as i64); + } } } diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index 4a5a16459d..c9b85e47b6 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -4,10 +4,13 @@ use beacon_chain::validator_monitor::timestamp_now; use fnv::FnvHashMap; use lighthouse_network::PeerId; use strum::IntoStaticStr; -use types::Hash256; +use types::{Hash256, Slot}; +pub use blobs_by_range::BlobsByRangeRequestItems; pub use blobs_by_root::{BlobsByRootRequestItems, BlobsByRootSingleBlockRequest}; +pub use blocks_by_range::BlocksByRangeRequestItems; pub use blocks_by_root::{BlocksByRootRequestItems, BlocksByRootSingleRequest}; +pub use data_columns_by_range::DataColumnsByRangeRequestItems; pub use data_columns_by_root::{ DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest, }; @@ -16,8 +19,11 @@ use crate::metrics; use super::{RpcEvent, RpcResponseResult}; +mod blobs_by_range; mod blobs_by_root; +mod blocks_by_range; mod blocks_by_root; +mod data_columns_by_range; mod data_columns_by_root; #[derive(Debug, PartialEq, Eq, IntoStaticStr)] @@ -26,8 +32,9 @@ pub enum LookupVerifyError { TooManyResponses, UnrequestedBlockRoot(Hash256), UnrequestedIndex(u64), + UnrequestedSlot(Slot), InvalidInclusionProof, - DuplicateData, + DuplicatedData(Slot, u64), InternalError(String), } diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_range.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_range.rs new file mode 100644 index 0000000000..9c6f516199 --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_range.rs @@ -0,0 +1,56 @@ +use super::{ActiveRequestItems, LookupVerifyError}; +use lighthouse_network::rpc::methods::BlobsByRangeRequest; +use std::sync::Arc; +use types::{BlobSidecar, EthSpec}; + +/// Accumulates results of a blobs_by_range request. Only returns items after receiving the +/// stream termination. +pub struct BlobsByRangeRequestItems { + request: BlobsByRangeRequest, + items: Vec>>, + max_blobs_per_block: u64, +} + +impl BlobsByRangeRequestItems { + pub fn new(request: BlobsByRangeRequest, max_blobs_per_block: u64) -> Self { + Self { + request, + items: vec![], + max_blobs_per_block, + } + } +} + +impl ActiveRequestItems for BlobsByRangeRequestItems { + type Item = Arc>; + + fn add(&mut self, blob: Self::Item) -> Result { + if blob.slot() < self.request.start_slot + || blob.slot() >= self.request.start_slot + self.request.count + { + return Err(LookupVerifyError::UnrequestedSlot(blob.slot())); + } + if blob.index >= self.max_blobs_per_block { + return Err(LookupVerifyError::UnrequestedIndex(blob.index)); + } + if !blob.verify_blob_sidecar_inclusion_proof() { + return Err(LookupVerifyError::InvalidInclusionProof); + } + if self + .items + .iter() + .any(|existing| existing.slot() == blob.slot() && existing.index == blob.index) + { + return Err(LookupVerifyError::DuplicatedData(blob.slot(), blob.index)); + } + + self.items.push(blob); + + // Skip check if blobs are ready as it's rare that all blocks have max blobs + Ok(false) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs index fefb27a5ef..547c51198e 100644 --- a/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blobs_by_root.rs @@ -1,6 +1,6 @@ use lighthouse_network::rpc::methods::BlobsByRootRequest; use std::sync::Arc; -use types::{blob_sidecar::BlobIdentifier, BlobSidecar, ChainSpec, EthSpec, Hash256}; +use types::{blob_sidecar::BlobIdentifier, BlobSidecar, EthSpec, ForkContext, Hash256}; use super::{ActiveRequestItems, LookupVerifyError}; @@ -11,7 +11,7 @@ pub struct BlobsByRootSingleBlockRequest { } impl BlobsByRootSingleBlockRequest { - pub fn into_request(self, spec: &ChainSpec) -> BlobsByRootRequest { + pub fn into_request(self, spec: &ForkContext) -> BlobsByRootRequest { BlobsByRootRequest::new( self.indices .into_iter() @@ -57,7 +57,7 @@ impl ActiveRequestItems for BlobsByRootRequestItems { return Err(LookupVerifyError::UnrequestedIndex(blob.index)); } if self.items.iter().any(|b| b.index == blob.index) { - return Err(LookupVerifyError::DuplicateData); + return Err(LookupVerifyError::DuplicatedData(blob.slot(), blob.index)); } self.items.push(blob); diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_range.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_range.rs new file mode 100644 index 0000000000..c7d2dda01e --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_range.rs @@ -0,0 +1,48 @@ +use super::{ActiveRequestItems, LookupVerifyError}; +use lighthouse_network::rpc::BlocksByRangeRequest; +use std::sync::Arc; +use types::{EthSpec, SignedBeaconBlock}; + +/// Accumulates results of a blocks_by_range request. Only returns items after receiving the +/// stream termination. +pub struct BlocksByRangeRequestItems { + request: BlocksByRangeRequest, + items: Vec>>, +} + +impl BlocksByRangeRequestItems { + pub fn new(request: BlocksByRangeRequest) -> Self { + Self { + request, + items: vec![], + } + } +} + +impl ActiveRequestItems for BlocksByRangeRequestItems { + type Item = Arc>; + + fn add(&mut self, block: Self::Item) -> Result { + if block.slot().as_u64() < *self.request.start_slot() + || block.slot().as_u64() >= self.request.start_slot() + self.request.count() + { + return Err(LookupVerifyError::UnrequestedSlot(block.slot())); + } + if self + .items + .iter() + .any(|existing| existing.slot() == block.slot()) + { + // DuplicatedData is a common error for all components, default index to 0 + return Err(LookupVerifyError::DuplicatedData(block.slot(), 0)); + } + + self.items.push(block); + + Ok(self.items.len() >= *self.request.count() as usize) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs index f3cdcbe714..6d7eabf909 100644 --- a/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/blocks_by_root.rs @@ -1,7 +1,7 @@ use beacon_chain::get_block_root; use lighthouse_network::rpc::BlocksByRootRequest; use std::sync::Arc; -use types::{ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; +use types::{EthSpec, ForkContext, Hash256, SignedBeaconBlock}; use super::{ActiveRequestItems, LookupVerifyError}; @@ -9,8 +9,8 @@ use super::{ActiveRequestItems, LookupVerifyError}; pub struct BlocksByRootSingleRequest(pub Hash256); impl BlocksByRootSingleRequest { - pub fn into_request(self, spec: &ChainSpec) -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![self.0], spec) + pub fn into_request(self, fork_context: &ForkContext) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![self.0], fork_context) } } diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs new file mode 100644 index 0000000000..9dabb2defa --- /dev/null +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_range.rs @@ -0,0 +1,54 @@ +use super::{ActiveRequestItems, LookupVerifyError}; +use lighthouse_network::rpc::methods::DataColumnsByRangeRequest; +use std::sync::Arc; +use types::{DataColumnSidecar, EthSpec}; + +/// Accumulates results of a data_columns_by_range request. Only returns items after receiving the +/// stream termination. +pub struct DataColumnsByRangeRequestItems { + request: DataColumnsByRangeRequest, + items: Vec>>, +} + +impl DataColumnsByRangeRequestItems { + pub fn new(request: DataColumnsByRangeRequest) -> Self { + Self { + request, + items: vec![], + } + } +} + +impl ActiveRequestItems for DataColumnsByRangeRequestItems { + type Item = Arc>; + + fn add(&mut self, data_column: Self::Item) -> Result { + if data_column.slot() < self.request.start_slot + || data_column.slot() >= self.request.start_slot + self.request.count + { + return Err(LookupVerifyError::UnrequestedSlot(data_column.slot())); + } + if !self.request.columns.contains(&data_column.index) { + return Err(LookupVerifyError::UnrequestedIndex(data_column.index)); + } + if !data_column.verify_inclusion_proof() { + return Err(LookupVerifyError::InvalidInclusionProof); + } + if self.items.iter().any(|existing| { + existing.slot() == data_column.slot() && existing.index == data_column.index + }) { + return Err(LookupVerifyError::DuplicatedData( + data_column.slot(), + data_column.index, + )); + } + + self.items.push(data_column); + + Ok(self.items.len() >= self.request.count as usize * self.request.columns.len()) + } + + fn consume(&mut self) -> Vec { + std::mem::take(&mut self.items) + } +} diff --git a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs index 1b8d46ff07..4e02737f08 100644 --- a/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs +++ b/beacon_node/network/src/sync/network_context/requests/data_columns_by_root.rs @@ -57,7 +57,10 @@ impl ActiveRequestItems for DataColumnsByRootRequestItems { return Err(LookupVerifyError::UnrequestedIndex(data_column.index)); } if self.items.iter().any(|d| d.index == data_column.index) { - return Err(LookupVerifyError::DuplicateData); + return Err(LookupVerifyError::DuplicatedData( + data_column.slot(), + data_column.index, + )); } self.items.push(data_column); diff --git a/beacon_node/network/src/sync/peer_sync_info.rs b/beacon_node/network/src/sync/peer_sync_info.rs index c01366f1be..5ea1533d35 100644 --- a/beacon_node/network/src/sync/peer_sync_info.rs +++ b/beacon_node/network/src/sync/peer_sync_info.rs @@ -30,8 +30,8 @@ pub fn remote_sync_type( ) -> PeerSyncType { // auxiliary variables for clarity: Inclusive boundaries of the range in which we consider a peer's // head "near" ours. - let near_range_start = local.head_slot - SLOT_IMPORT_TOLERANCE as u64; - let near_range_end = local.head_slot + SLOT_IMPORT_TOLERANCE as u64; + let near_range_start = local.head_slot.saturating_sub(SLOT_IMPORT_TOLERANCE); + let near_range_end = local.head_slot.saturating_add(SLOT_IMPORT_TOLERANCE); match remote.finalized_epoch.cmp(&local.finalized_epoch) { Ordering::Less => { diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 53fb55b14d..912287a8a4 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,4 +1,4 @@ -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::RpcBlock; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::service::api_types::Id; use lighthouse_network::PeerId; @@ -271,42 +271,9 @@ impl BatchInfo { pub fn download_completed( &mut self, blocks: Vec>, - ) -> Result< - usize, /* Received blocks */ - Result<(Slot, Slot, BatchOperationOutcome), WrongState>, - > { + ) -> Result { match self.state.poison() { BatchState::Downloading(peer, _request_id) => { - // verify that blocks are in range - if let Some(last_slot) = blocks.last().map(|b| b.slot()) { - // the batch is non-empty - let first_slot = blocks[0].slot(); - - let failed_range = if first_slot < self.start_slot { - Some((self.start_slot, first_slot)) - } else if self.end_slot < last_slot { - Some((self.end_slot, last_slot)) - } else { - None - }; - - if let Some((expected, received)) = failed_range { - // this is a failed download, register the attempt and check if the batch - // can be tried again - self.failed_download_attempts.push(peer); - self.state = if self.failed_download_attempts.len() - >= B::max_batch_download_attempts() as usize - { - BatchState::Failed - } else { - // drop the blocks - BatchState::AwaitingDownload - }; - - return Err(Ok((expected, received, self.outcome()))); - } - } - let received = blocks.len(); self.state = BatchState::AwaitingProcessing(peer, blocks, Instant::now()); Ok(received) @@ -314,10 +281,10 @@ impl BatchInfo { BatchState::Poisoned => unreachable!("Poisoned batch"), other => { self.state = other; - Err(Err(WrongState(format!( + Err(WrongState(format!( "Download completed for batch in wrong state {:?}", self.state - )))) + ))) } } } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 51d9d9da37..f02262e4b5 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -15,7 +15,6 @@ use rand::seq::SliceRandom; use rand::Rng; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; -use std::hash::{Hash, Hasher}; use strum::IntoStaticStr; use types::{Epoch, EthSpec, Hash256, Slot}; @@ -56,7 +55,7 @@ pub enum RemoveChain { pub struct KeepChain; /// A chain identifier -pub type ChainId = u64; +pub type ChainId = Id; pub type BatchId = Epoch; #[derive(Debug, Copy, Clone, IntoStaticStr)] @@ -127,14 +126,9 @@ pub enum ChainSyncingState { } impl SyncingChain { - pub fn id(target_root: &Hash256, target_slot: &Slot) -> u64 { - let mut hasher = std::collections::hash_map::DefaultHasher::new(); - (target_root, target_slot).hash(&mut hasher); - hasher.finish() - } - #[allow(clippy::too_many_arguments)] pub fn new( + id: Id, start_epoch: Epoch, target_head_slot: Slot, target_head_root: Hash256, @@ -145,8 +139,6 @@ impl SyncingChain { let mut peers = FnvHashMap::default(); peers.insert(peer_id, Default::default()); - let id = SyncingChain::::id(&target_head_root, &target_head_slot); - SyncingChain { id, chain_type, @@ -165,6 +157,11 @@ impl SyncingChain { } } + /// Returns true if this chain has the same target + pub fn has_same_target(&self, target_head_slot: Slot, target_head_root: Hash256) -> bool { + self.target_head_slot == target_head_slot && self.target_head_root == target_head_root + } + /// Check if the chain has peers from which to process batches. pub fn available_peers(&self) -> usize { self.peers.len() @@ -268,40 +265,21 @@ impl SyncingChain { } }; - { - // A stream termination has been sent. This batch has ended. Process a completed batch. - // Remove the request from the peer's active batches - self.peers - .get_mut(peer_id) - .map(|active_requests| active_requests.remove(&batch_id)); + // A stream termination has been sent. This batch has ended. Process a completed batch. + // Remove the request from the peer's active batches + self.peers + .get_mut(peer_id) + .map(|active_requests| active_requests.remove(&batch_id)); - match batch.download_completed(blocks) { - Ok(received) => { - let awaiting_batches = batch_id - .saturating_sub(self.optimistic_start.unwrap_or(self.processing_target)) - / EPOCHS_PER_BATCH; - debug!(self.log, "Batch downloaded"; "epoch" => batch_id, "blocks" => received, "batch_state" => self.visualize_batch_state(), "awaiting_batches" => awaiting_batches); + let received = batch.download_completed(blocks)?; + let awaiting_batches = batch_id + .saturating_sub(self.optimistic_start.unwrap_or(self.processing_target)) + / EPOCHS_PER_BATCH; + debug!(self.log, "Batch downloaded"; "epoch" => batch_id, "blocks" => received, "batch_state" => self.visualize_batch_state(), "awaiting_batches" => awaiting_batches); - // pre-emptively request more blocks from peers whilst we process current blocks, - self.request_batches(network)?; - self.process_completed_batches(network) - } - Err(result) => { - let (expected_boundary, received_boundary, outcome) = result?; - warn!(self.log, "Batch received out of range blocks"; "expected_boundary" => expected_boundary, "received_boundary" => received_boundary, - "peer_id" => %peer_id, batch); - - if let BatchOperationOutcome::Failed { blacklist } = outcome { - return Err(RemoveChain::ChainFailed { - blacklist, - failing_batch: batch_id, - }); - } - // this batch can't be used, so we need to request it again. - self.retry_batch_download(network, batch_id) - } - } - } + // pre-emptively request more blocks from peers whilst we process current blocks, + self.request_batches(network)?; + self.process_completed_batches(network) } /// Processes the batch with the given id. @@ -1258,7 +1236,7 @@ impl slog::KV for SyncingChain { serializer: &mut dyn slog::Serializer, ) -> slog::Result { use slog::Value; - serializer.emit_u64("id", self.id)?; + serializer.emit_u32("id", self.id)?; Value::serialize(&self.start_epoch, record, "from", serializer)?; Value::serialize( &self.target_head_slot.epoch(T::EthSpec::slots_per_epoch()), diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index c030d0a19e..15bdf85e20 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -9,6 +9,7 @@ use crate::metrics; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; +use lighthouse_network::service::api_types::Id; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; use slog::{crit, debug, error}; @@ -29,9 +30,9 @@ const MIN_FINALIZED_CHAIN_PROCESSED_EPOCHS: u64 = 10; #[derive(Clone)] pub enum RangeSyncState { /// A finalized chain is being synced. - Finalized(u64), + Finalized(Id), /// There are no finalized chains and we are syncing one more head chains. - Head(SmallVec<[u64; PARALLEL_HEAD_CHAINS]>), + Head(SmallVec<[Id; PARALLEL_HEAD_CHAINS]>), /// There are no head or finalized chains and no long range sync is in progress. Idle, } @@ -74,7 +75,7 @@ impl ChainCollection { if syncing_id == id { // the finalized chain that was syncing was removed debug_assert!(was_syncing && sync_type == RangeSyncType::Finalized); - let syncing_head_ids: SmallVec<[u64; PARALLEL_HEAD_CHAINS]> = self + let syncing_head_ids: SmallVec<[Id; PARALLEL_HEAD_CHAINS]> = self .head_chains .iter() .filter(|(_id, chain)| chain.is_syncing()) @@ -86,7 +87,7 @@ impl ChainCollection { RangeSyncState::Head(syncing_head_ids) }; } else { - // we removed a head chain, or an stoped finalized chain + // we removed a head chain, or a stopped finalized chain debug_assert!(!was_syncing || sync_type != RangeSyncType::Finalized); } } @@ -355,7 +356,7 @@ impl ChainCollection { .collect::>(); preferred_ids.sort_unstable(); - let mut syncing_chains = SmallVec::<[u64; PARALLEL_HEAD_CHAINS]>::new(); + let mut syncing_chains = SmallVec::<[Id; PARALLEL_HEAD_CHAINS]>::new(); for (_, _, id) in preferred_ids { let chain = self.head_chains.get_mut(&id).expect("known chain"); if syncing_chains.len() < PARALLEL_HEAD_CHAINS { @@ -465,15 +466,17 @@ impl ChainCollection { sync_type: RangeSyncType, network: &mut SyncNetworkContext, ) { - let id = SyncingChain::::id(&target_head_root, &target_head_slot); let collection = if let RangeSyncType::Finalized = sync_type { &mut self.finalized_chains } else { &mut self.head_chains }; - match collection.entry(id) { - Entry::Occupied(mut entry) => { - let chain = entry.get_mut(); + + match collection + .iter_mut() + .find(|(_, chain)| chain.has_same_target(target_head_slot, target_head_root)) + { + Some((&id, chain)) => { debug!(self.log, "Adding peer to known chain"; "peer_id" => %peer, "sync_type" => ?sync_type, &chain); debug_assert_eq!(chain.target_head_root, target_head_root); debug_assert_eq!(chain.target_head_slot, target_head_slot); @@ -483,13 +486,16 @@ impl ChainCollection { } else { error!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); } - let chain = entry.remove(); - self.on_chain_removed(&id, chain.is_syncing(), sync_type); + let is_syncing = chain.is_syncing(); + collection.remove(&id); + self.on_chain_removed(&id, is_syncing, sync_type); } } - Entry::Vacant(entry) => { + None => { let peer_rpr = peer.to_string(); + let id = network.next_id(); let new_chain = SyncingChain::new( + id, start_epoch, target_head_slot, target_head_root, @@ -497,9 +503,8 @@ impl ChainCollection { sync_type.into(), &self.log, ); - debug_assert_eq!(new_chain.get_id(), id); debug!(self.log, "New chain added to sync"; "peer_id" => peer_rpr, "sync_type" => ?sync_type, &new_chain); - entry.insert(new_chain); + collection.insert(id, new_chain); metrics::inc_counter_vec(&metrics::SYNCING_CHAINS_ADDED, &[sync_type.as_str()]); self.update_metrics(); } diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index f623aa2c12..9ab581950c 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -39,11 +39,12 @@ use lighthouse_network::{ use slog::info; use slot_clock::{SlotClock, TestingSlotClock}; use tokio::sync::mpsc; +use types::ForkContext; use types::{ data_column_sidecar::ColumnIndex, test_utils::{SeedableRng, TestRandom, XorShiftRng}, - BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, Epoch, EthSpec, ForkName, - Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot, + BeaconState, BeaconStateBase, BlobSidecar, DataColumnSidecar, EthSpec, ForkName, Hash256, + MinimalEthSpec as E, SignedBeaconBlock, Slot, }; const D: Duration = Duration::new(0, 0); @@ -53,12 +54,8 @@ const SAMPLING_REQUIRED_SUCCESSES: usize = 2; type DCByRootIds = Vec; type DCByRootId = (SyncRequestId, Vec); -struct TestRigConfig { - peer_das_enabled: bool, -} - impl TestRig { - fn test_setup_with_config(config: Option) -> Self { + pub fn test_setup() -> Self { let logger_type = if cfg!(feature = "test_logger") { LoggerType::Test } else if cfg!(feature = "ci_logger") { @@ -69,13 +66,7 @@ impl TestRig { let log = build_log(slog::Level::Trace, logger_type); // Use `fork_from_env` logic to set correct fork epochs - let mut spec = test_spec::(); - - if let Some(config) = config { - if config.peer_das_enabled { - spec.eip7594_fork_epoch = Some(Epoch::new(0)); - } - } + let spec = test_spec::(); // Initialise a new beacon chain let harness = BeaconChainHarness::>::builder(E) @@ -92,6 +83,11 @@ impl TestRig { .build(); let chain = harness.chain.clone(); + let fork_context = Arc::new(ForkContext::new::( + Slot::new(0), + chain.genesis_validators_root, + &chain.spec, + )); let (network_tx, network_rx) = mpsc::unbounded_channel(); let (sync_tx, sync_rx) = mpsc::unbounded_channel::>(); @@ -139,6 +135,7 @@ impl TestRig { SamplingConfig::Custom { required_successes: vec![SAMPLING_REQUIRED_SUCCESSES], }, + fork_context, log.clone(), ), harness, @@ -148,24 +145,18 @@ impl TestRig { } } - pub fn test_setup() -> Self { - Self::test_setup_with_config(None) - } - - fn test_setup_after_deneb() -> Option { + fn test_setup_after_deneb_before_fulu() -> Option { let r = Self::test_setup(); - if r.after_deneb() { + if r.after_deneb() && !r.fork_name.fulu_enabled() { Some(r) } else { None } } - fn test_setup_after_peerdas() -> Option { - let r = Self::test_setup_with_config(Some(TestRigConfig { - peer_das_enabled: true, - })); - if r.after_deneb() { + fn test_setup_after_fulu() -> Option { + let r = Self::test_setup(); + if r.fork_name.fulu_enabled() { Some(r) } else { None @@ -180,6 +171,10 @@ impl TestRig { self.fork_name.deneb_enabled() } + pub fn after_fulu(&self) -> bool { + self.fork_name.fulu_enabled() + } + fn trigger_unknown_parent_block(&mut self, peer_id: PeerId, block: Arc>) { let block_root = block.canonical_root(); self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root)) @@ -380,7 +375,7 @@ impl TestRig { .__add_connected_peer_testing_only(false, &self.harness.spec) } - fn new_connected_supernode_peer(&mut self) -> PeerId { + pub fn new_connected_supernode_peer(&mut self) -> PeerId { self.network_globals .peers .write() @@ -1677,7 +1672,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { rig.assert_not_failed_chain(block_root); // send the right parent but fail processing rig.parent_lookup_block_response(id, peer_id, Some(parent.clone().into())); - rig.parent_block_processed(block_root, BlockError::InvalidSignature.into()); + rig.parent_block_processed(block_root, BlockError::BlockSlotLimitReached.into()); rig.parent_lookup_block_response(id, peer_id, None); rig.expect_penalty(peer_id, "lookup_block_processing_failure"); } @@ -1938,7 +1933,7 @@ fn test_same_chain_race_condition() { #[test] fn block_in_da_checker_skips_download() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); @@ -1956,7 +1951,7 @@ fn block_in_da_checker_skips_download() { #[test] fn block_in_processing_cache_becomes_invalid() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); @@ -1982,7 +1977,7 @@ fn block_in_processing_cache_becomes_invalid() { #[test] fn block_in_processing_cache_becomes_valid_imported() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); @@ -2007,7 +2002,7 @@ fn block_in_processing_cache_becomes_valid_imported() { #[ignore] #[test] fn blobs_in_da_checker_skip_download() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(1)); @@ -2026,7 +2021,7 @@ fn blobs_in_da_checker_skip_download() { #[test] fn sampling_happy_path() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; r.new_connected_peers_for_peerdas(); @@ -2043,7 +2038,7 @@ fn sampling_happy_path() { #[test] fn sampling_with_retries() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; r.new_connected_peers_for_peerdas(); @@ -2065,7 +2060,7 @@ fn sampling_with_retries() { #[test] fn sampling_avoid_retrying_same_peer() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; let peer_id_1 = r.new_connected_supernode_peer(); @@ -2086,7 +2081,7 @@ fn sampling_avoid_retrying_same_peer() { #[test] fn sampling_batch_requests() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; let _supernode = r.new_connected_supernode_peer(); @@ -2112,7 +2107,7 @@ fn sampling_batch_requests() { #[test] fn sampling_batch_requests_not_enough_responses_returned() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; let _supernode = r.new_connected_supernode_peer(); @@ -2157,7 +2152,7 @@ fn sampling_batch_requests_not_enough_responses_returned() { #[test] fn custody_lookup_happy_path() { - let Some(mut r) = TestRig::test_setup_after_peerdas() else { + let Some(mut r) = TestRig::test_setup_after_fulu() else { return; }; let spec = E::default_spec(); @@ -2231,7 +2226,7 @@ mod deneb_only { impl DenebTester { fn new(request_trigger: RequestTrigger) -> Option { - let Some(mut rig) = TestRig::test_setup_after_deneb() else { + let Some(mut rig) = TestRig::test_setup_after_deneb_before_fulu() else { return None; }; let (block, blobs) = rig.rand_block_and_blobs(NumBlobs::Random); @@ -2575,7 +2570,7 @@ mod deneb_only { fn invalid_parent_processed(mut self) -> Self { self.rig.parent_block_processed( self.block_root, - BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + BlockProcessingResult::Err(BlockError::BlockSlotLimitReached), ); assert_eq!(self.rig.active_parent_lookups_count(), 1); self @@ -2584,7 +2579,7 @@ mod deneb_only { fn invalid_block_processed(mut self) -> Self { self.rig.single_block_component_processed( self.block_req_id.expect("block request id").lookup_id, - BlockProcessingResult::Err(BlockError::ProposalSignatureInvalid), + BlockProcessingResult::Err(BlockError::BlockSlotLimitReached), ); self.rig.assert_single_lookups_count(1); self @@ -2956,7 +2951,7 @@ mod deneb_only { #[ignore] #[test] fn no_peer_penalty_when_rpc_response_already_known_from_gossip() { - let Some(mut r) = TestRig::test_setup_after_deneb() else { + let Some(mut r) = TestRig::test_setup_after_deneb_before_fulu() else { return; }; let (block, blobs) = r.rand_block_and_blobs(NumBlobs::Number(2)); diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 05d5e4a414..f78b44308d 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -3,10 +3,18 @@ use crate::status::ToStatusMessage; use crate::sync::manager::SLOT_IMPORT_TOLERANCE; use crate::sync::range_sync::RangeSyncType; use crate::sync::SyncMessage; +use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; use beacon_chain::{block_verification_types::RpcBlock, EngineState, NotifyExecutionLayer}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, DataColumnsByRangeRequest, OldBlocksByRangeRequest, + OldBlocksByRangeRequestV2, +}; use lighthouse_network::rpc::{RequestType, StatusMessage}; -use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; +use lighthouse_network::service::api_types::{ + AppRequestId, BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, + SyncRequestId, +}; use lighthouse_network::{PeerId, SyncInfo}; use std::time::Duration; use types::{ @@ -16,6 +24,47 @@ use types::{ const D: Duration = Duration::new(0, 0); +pub(crate) enum DataSidecars { + Blobs(BlobSidecarList), + DataColumns(Vec>), +} + +enum ByRangeDataRequestIds { + PreDeneb, + PrePeerDAS(BlobsByRangeRequestId, PeerId), + PostPeerDAS(Vec<(DataColumnsByRangeRequestId, PeerId)>), +} + +/// Sync tests are usually written in the form: +/// - Do some action +/// - Expect a request to be sent +/// - Complete the above request +/// +/// To make writting tests succint, the machinery in this testing rig automatically identifies +/// _which_ request to complete. Picking the right request is critical for tests to pass, so this +/// filter allows better expressivity on the criteria to identify the right request. +#[derive(Default)] +struct RequestFilter { + peer: Option, + epoch: Option, +} + +impl RequestFilter { + fn peer(mut self, peer: PeerId) -> Self { + self.peer = Some(peer); + self + } + + fn epoch(mut self, epoch: u64) -> Self { + self.epoch = Some(epoch); + self + } +} + +fn filter() -> RequestFilter { + RequestFilter::default() +} + impl TestRig { /// Produce a head peer with an advanced head fn add_head_peer(&mut self) -> PeerId { @@ -67,7 +116,9 @@ impl TestRig { fn add_peer(&mut self, remote_info: SyncInfo) -> PeerId { // Create valid peer known to network globals - let peer_id = self.new_connected_peer(); + // TODO(fulu): Using supernode peers to ensure we have peer across all column + // subnets for syncing. Should add tests connecting to full node peers. + let peer_id = self.new_connected_supernode_peer(); // Send peer to sync self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info.clone())); peer_id @@ -86,11 +137,13 @@ impl TestRig { } #[track_caller] - fn expect_chain_segment(&mut self) { - self.pop_received_processor_event(|ev| { - (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) - }) - .unwrap_or_else(|e| panic!("Expect ChainSegment work event: {e:?}")); + fn expect_chain_segments(&mut self, count: usize) { + for i in 0..count { + self.pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expect ChainSegment work event count {i}: {e:?}")); + } } fn update_execution_engine_state(&mut self, state: EngineState) { @@ -98,68 +151,124 @@ impl TestRig { self.sync_manager.update_execution_engine_state(state); } - fn find_blocks_by_range_request(&mut self, target_peer_id: &PeerId) -> (Id, Option) { + fn find_blocks_by_range_request( + &mut self, + request_filter: RequestFilter, + ) -> ((BlocksByRangeRequestId, PeerId), ByRangeDataRequestIds) { + let filter_f = |peer: PeerId, start_slot: u64| { + if let Some(expected_epoch) = request_filter.epoch { + let epoch = Slot::new(start_slot).epoch(E::slots_per_epoch()).as_u64(); + if epoch != expected_epoch { + return false; + } + } + if let Some(expected_peer) = request_filter.peer { + if peer != expected_peer { + return false; + } + } + true + }; + let block_req_id = self .pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id, - request: RequestType::BlocksByRange(_), - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - } if peer_id == target_peer_id => Some(*id), + request: + RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( + OldBlocksByRangeRequestV2 { start_slot, .. }, + )), + request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), + } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) .expect("Should have a blocks by range request"); - let blob_req_id = if self.after_deneb() { - Some( - self.pop_received_network_event(|ev| match ev { + let by_range_data_requests = if self.after_fulu() { + let mut data_columns_requests = vec![]; + while let Ok(data_columns_request) = self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: + RequestType::DataColumnsByRange(DataColumnsByRangeRequest { + start_slot, .. + }), + request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), + } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), + _ => None, + }) { + data_columns_requests.push(data_columns_request); + } + if data_columns_requests.is_empty() { + panic!("Found zero DataColumnsByRange requests"); + } + ByRangeDataRequestIds::PostPeerDAS(data_columns_requests) + } else if self.after_deneb() { + let (id, peer) = self + .pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id, - request: RequestType::BlobsByRange(_), - request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), - } if peer_id == target_peer_id => Some(*id), + request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot, .. }), + request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), + } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) - .expect("Should have a blobs by range request"), - ) + .expect("Should have a blobs by range request"); + ByRangeDataRequestIds::PrePeerDAS(id, peer) } else { - None + ByRangeDataRequestIds::PreDeneb }; - (block_req_id, blob_req_id) + (block_req_id, by_range_data_requests) } - fn find_and_complete_blocks_by_range_request(&mut self, target_peer_id: PeerId) { - let (blocks_req_id, blobs_req_id) = self.find_blocks_by_range_request(&target_peer_id); + fn find_and_complete_blocks_by_range_request(&mut self, request_filter: RequestFilter) { + let ((blocks_req_id, block_peer), by_range_data_request_ids) = + self.find_blocks_by_range_request(request_filter); // Complete the request with a single stream termination self.log(&format!( - "Completing BlocksByRange request {blocks_req_id} with empty stream" + "Completing BlocksByRange request {blocks_req_id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcBlock { - request_id: SyncRequestId::RangeBlockAndBlobs { id: blocks_req_id }, - peer_id: target_peer_id, + request_id: SyncRequestId::BlocksByRange(blocks_req_id), + peer_id: block_peer, beacon_block: None, seen_timestamp: D, }); - if let Some(blobs_req_id) = blobs_req_id { - // Complete the request with a single stream termination - self.log(&format!( - "Completing BlobsByRange request {blobs_req_id} with empty stream" - )); - self.send_sync_message(SyncMessage::RpcBlob { - request_id: SyncRequestId::RangeBlockAndBlobs { id: blobs_req_id }, - peer_id: target_peer_id, - blob_sidecar: None, - seen_timestamp: D, - }); + match by_range_data_request_ids { + ByRangeDataRequestIds::PreDeneb => {} + ByRangeDataRequestIds::PrePeerDAS(id, peer_id) => { + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlobsByRange request {id:?} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlob { + request_id: SyncRequestId::BlobsByRange(id), + peer_id, + blob_sidecar: None, + seen_timestamp: D, + }); + } + ByRangeDataRequestIds::PostPeerDAS(data_column_req_ids) => { + // Complete the request with a single stream termination + for (id, peer_id) in data_column_req_ids { + self.log(&format!( + "Completing DataColumnsByRange request {id:?} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcDataColumn { + request_id: SyncRequestId::DataColumnsByRange(id), + peer_id, + data_column: None, + seen_timestamp: D, + }); + } + } } } - async fn create_canonical_block( - &mut self, - ) -> (SignedBeaconBlock, Option>) { + async fn create_canonical_block(&mut self) -> (SignedBeaconBlock, Option>) { self.harness.advance_slot(); let block_root = self @@ -170,20 +279,38 @@ impl TestRig { AttestationStrategy::AllValidators, ) .await; - // TODO(das): this does not handle data columns yet + let store = &self.harness.chain.store; let block = store.get_full_block(&block_root).unwrap().unwrap(); - let blobs = if block.fork_name_unchecked().deneb_enabled() { - store.get_blobs(&block_root).unwrap().blobs() + let fork = block.fork_name_unchecked(); + + let data_sidecars = if fork.fulu_enabled() { + store + .get_data_columns(&block_root) + .unwrap() + .map(|columns| { + columns + .into_iter() + .map(CustodyDataColumn::from_asserted_custody) + .collect() + }) + .map(DataSidecars::DataColumns) + } else if fork.deneb_enabled() { + store + .get_blobs(&block_root) + .unwrap() + .blobs() + .map(DataSidecars::Blobs) } else { None }; - (block, blobs) + + (block, data_sidecars) } async fn remember_block( &mut self, - (block, blob_sidecars): (SignedBeaconBlock, Option>), + (block, data_sidecars): (SignedBeaconBlock, Option>), ) { // This code is kind of duplicated from Harness::process_block, but takes sidecars directly. let block_root = block.canonical_root(); @@ -193,7 +320,7 @@ impl TestRig { .chain .process_block( block_root, - RpcBlock::new(Some(block_root), block.into(), blob_sidecars).unwrap(), + build_rpc_block(block.into(), &data_sidecars, &self.spec), NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), @@ -206,6 +333,22 @@ impl TestRig { } } +fn build_rpc_block( + block: Arc>, + data_sidecars: &Option>, + spec: &ChainSpec, +) -> RpcBlock { + match data_sidecars { + Some(DataSidecars::Blobs(blobs)) => { + RpcBlock::new(None, block, Some(blobs.clone())).unwrap() + } + Some(DataSidecars::DataColumns(columns)) => { + RpcBlock::new_with_custody_columns(None, block, columns.clone(), spec).unwrap() + } + None => RpcBlock::new_without_blobs(None, block), + } +} + #[test] fn head_chain_removed_while_finalized_syncing() { // NOTE: this is a regression test. @@ -217,14 +360,14 @@ fn head_chain_removed_while_finalized_syncing() { rig.assert_state(RangeSyncType::Head); // Sync should have requested a batch, grab the request. - let _ = rig.find_blocks_by_range_request(&head_peer); + let _ = rig.find_blocks_by_range_request(filter().peer(head_peer)); // Now get a peer with an advanced finalized epoch. let finalized_peer = rig.add_finalized_peer(); rig.assert_state(RangeSyncType::Finalized); // Sync should have requested a batch, grab the request - let _ = rig.find_blocks_by_range_request(&finalized_peer); + let _ = rig.find_blocks_by_range_request(filter().peer(finalized_peer)); // Fail the head chain by disconnecting the peer. rig.peer_disconnected(head_peer); @@ -251,14 +394,14 @@ async fn state_update_while_purging() { rig.assert_state(RangeSyncType::Head); // Sync should have requested a batch, grab the request. - let _ = rig.find_blocks_by_range_request(&head_peer); + let _ = rig.find_blocks_by_range_request(filter().peer(head_peer)); // Now get a peer with an advanced finalized epoch. let finalized_peer = rig.add_finalized_peer_with_root(finalized_peer_root); rig.assert_state(RangeSyncType::Finalized); // Sync should have requested a batch, grab the request - let _ = rig.find_blocks_by_range_request(&finalized_peer); + let _ = rig.find_blocks_by_range_request(filter().peer(finalized_peer)); // Now the chain knows both chains target roots. rig.remember_block(head_peer_block).await; @@ -277,15 +420,18 @@ fn pause_and_resume_on_ee_offline() { // make the ee offline rig.update_execution_engine_state(EngineState::Offline); // send the response to the request - rig.find_and_complete_blocks_by_range_request(peer1); + rig.find_and_complete_blocks_by_range_request(filter().peer(peer1).epoch(0)); // the beacon processor shouldn't have received any work rig.expect_empty_processor(); // while the ee is offline, more peers might arrive. Add a new finalized peer. - let peer2 = rig.add_finalized_peer(); + let _peer2 = rig.add_finalized_peer(); // send the response to the request - rig.find_and_complete_blocks_by_range_request(peer2); + // Don't filter requests and the columns requests may be sent to peer1 or peer2 + // We need to filter by epoch, because the previous batch eagerly sent requests for the next + // epoch for the other batch. So we can either filter by epoch of by sync type. + rig.find_and_complete_blocks_by_range_request(filter().epoch(0)); // the beacon processor shouldn't have received any work rig.expect_empty_processor(); // make the beacon processor available again. @@ -293,6 +439,6 @@ fn pause_and_resume_on_ee_offline() { // now resume range, we should have two processing requests in the beacon processor. rig.update_execution_engine_state(EngineState::Online); - rig.expect_chain_segment(); - rig.expect_chain_segment(); + // The head chain and finalized chain (2) should be in the processing queue + rig.expect_chain_segments(2); } diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 083c1170f0..49ef5c279c 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -214,7 +214,7 @@ impl CompactIndexedAttestationElectra { .is_zero() } - /// Returns `true` if aggregated, otherwise `false`. + /// Returns `true` if aggregated, otherwise `false`. pub fn aggregate_same_committee(&mut self, other: &Self) -> bool { if self.committee_bits != other.committee_bits { return false; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index cecfcee868..4c2daecdd3 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -18,15 +18,6 @@ pub fn cli_app() -> Command { /* * Configuration directory locations. */ - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - ) .arg( Arg::new("network-dir") .long("network-dir") @@ -156,16 +147,16 @@ pub fn cli_app() -> Command { .long("listen-address") .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. To listen \ - over IpV4 and IpV6 set this flag twice with the different values.\n\ + over IPv4 and IPv6 set this flag twice with the different values.\n\ Examples:\n\ - --listen-address '0.0.0.0' will listen over IPv4.\n\ - --listen-address '::' will listen over IPv6.\n\ - --listen-address '0.0.0.0' --listen-address '::' will listen over both \ IPv4 and IPv6. The order of the given addresses is not relevant. However, \ - multiple IPv4, or multiple IPv6 addresses will not be accepted.") + multiple IPv4, or multiple IPv6 addresses will not be accepted. \ + If omitted, Lighthouse will listen on all interfaces, for both IPv4 and IPv6.") .action(ArgAction::Append) .num_args(0..=2) - .default_value("0.0.0.0") .display_order(0) ) .arg( @@ -185,8 +176,7 @@ pub fn cli_app() -> Command { .long("port6") .value_name("PORT") .help("The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 and \ - IPv6. Defaults to 9090 when required. The Quic UDP port will be set to this value + 1.") - .default_value("9090") + IPv6. Defaults to --port. The Quic UDP port will be set to this value + 1.") .action(ArgAction::Set) .display_order(0) ) @@ -1504,9 +1494,18 @@ pub fn cli_app() -> Command { .arg( Arg::new("light-client-server") .long("light-client-server") - .help("Act as a full node supporting light clients on the p2p network \ - [experimental]") + .help("DEPRECATED") .action(ArgAction::SetTrue) + + .help_heading(FLAG_HEADER) + .display_order(0) + ) + .arg( + Arg::new("disable-light-client-server") + .long("disable-light-client-server") + .help("Disables light client support on the p2p network") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) .display_order(0) ) @@ -1591,5 +1590,14 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) + .arg( + Arg::new("beacon-node-backend") + .long("beacon-node-backend") + .value_name("DATABASE") + .value_parser(store::config::DatabaseBackend::VARIANTS.to_vec()) + .help("Set the database backend to be used by the beacon node.") + .action(ArgAction::Set) + .display_order(0) + ) .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 8d8a44a6fd..24d569bea2 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -176,11 +176,19 @@ pub fn get_config( parse_required(cli_args, "http-duplicate-block-status")?; client_config.http_api.enable_light_client_server = - cli_args.get_flag("light-client-server"); + !cli_args.get_flag("disable-light-client-server"); } if cli_args.get_flag("light-client-server") { - client_config.chain.enable_light_client_server = true; + warn!( + log, + "The --light-client-server flag is deprecated. The light client server is enabled \ + by default" + ); + } + + if cli_args.get_flag("disable-light-client-server") { + client_config.chain.enable_light_client_server = false; } if let Some(cache_size) = clap_utils::parse_optional(cli_args, "shuffling-cache-size")? { @@ -432,6 +440,10 @@ pub fn get_config( warn!(log, "The slots-per-restore-point flag is deprecated"); } + if let Some(backend) = clap_utils::parse_optional(cli_args, "beacon-node-backend")? { + client_config.store.backend = backend; + } + if let Some(hierarchy_config) = clap_utils::parse_optional(cli_args, "hierarchy-exponents")? { client_config.store.hierarchy_config = hierarchy_config; } @@ -893,12 +905,13 @@ pub fn parse_listening_addresses( ) -> Result { let listen_addresses_str = cli_args .get_many::("listen-address") - .expect("--listen_addresses has a default value"); + .unwrap_or_default(); let use_zero_ports = parse_flag(cli_args, "zero-ports"); // parse the possible ips let mut maybe_ipv4 = None; let mut maybe_ipv6 = None; + for addr_str in listen_addresses_str { let addr = addr_str.parse::().map_err(|parse_error| { format!("Failed to parse listen-address ({addr_str}) as an Ip address: {parse_error}") @@ -908,8 +921,8 @@ pub fn parse_listening_addresses( IpAddr::V4(v4_addr) => match &maybe_ipv4 { Some(first_ipv4_addr) => { return Err(format!( - "When setting the --listen-address option twice, use an IpV4 address and an Ipv6 address. \ - Got two IpV4 addresses {first_ipv4_addr} and {v4_addr}" + "When setting the --listen-address option twice, use an IPv4 address and an IPv6 address. \ + Got two IPv4 addresses {first_ipv4_addr} and {v4_addr}" )); } None => maybe_ipv4 = Some(v4_addr), @@ -917,8 +930,8 @@ pub fn parse_listening_addresses( IpAddr::V6(v6_addr) => match &maybe_ipv6 { Some(first_ipv6_addr) => { return Err(format!( - "When setting the --listen-address option twice, use an IpV4 address and an Ipv6 address. \ - Got two IpV6 addresses {first_ipv6_addr} and {v6_addr}" + "When setting the --listen-address option twice, use an IPv4 address and an IPv6 address. \ + Got two IPv6 addresses {first_ipv6_addr} and {v6_addr}" )); } None => maybe_ipv6 = Some(v6_addr), @@ -932,12 +945,11 @@ pub fn parse_listening_addresses( .expect("--port has a default value") .parse::() .map_err(|parse_error| format!("Failed to parse --port as an integer: {parse_error}"))?; - let port6 = cli_args + let maybe_port6 = cli_args .get_one::("port6") .map(|s| str::parse::(s)) .transpose() - .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))? - .unwrap_or(9090); + .map_err(|parse_error| format!("Failed to parse --port6 as an integer: {parse_error}"))?; // parse the possible discovery ports. let maybe_disc_port = cli_args @@ -973,11 +985,22 @@ pub fn parse_listening_addresses( format!("Failed to parse --quic6-port as an integer: {parse_error}") })?; + // Here we specify the default listening addresses for Lighthouse. + // By default, we listen on 0.0.0.0. + // + // IF the host supports a globally routable IPv6 address, we also listen on ::. + if matches!((maybe_ipv4, maybe_ipv6), (None, None)) { + maybe_ipv4 = Some(Ipv4Addr::UNSPECIFIED); + + if NetworkConfig::is_ipv6_supported() { + maybe_ipv6 = Some(Ipv6Addr::UNSPECIFIED); + } + } + // Now put everything together let listening_addresses = match (maybe_ipv4, maybe_ipv6) { (None, None) => { - // This should never happen unless clap is broken - return Err("No listening addresses provided".into()); + unreachable!("This path is handled above this match statement"); } (None, Some(ipv6)) => { // A single ipv6 address was provided. Set the ports @@ -985,6 +1008,10 @@ pub fn parse_listening_addresses( warn!(log, "When listening only over IPv6, use the --port flag. The value of --port6 will be ignored."); } + // If we are only listening on ipv6 and the user has specified --port6, lets just use + // that. + let port = maybe_port6.unwrap_or(port); + // use zero ports if required. If not, use the given port. let tcp_port = use_zero_ports .then(unused_port::unused_tcp6_port) @@ -1051,6 +1078,9 @@ pub fn parse_listening_addresses( }) } (Some(ipv4), Some(ipv6)) => { + // If --port6 is not set, we use --port + let port6 = maybe_port6.unwrap_or(port); + let ipv4_tcp_port = use_zero_ports .then(unused_port::unused_tcp4_port) .transpose()? @@ -1070,7 +1100,7 @@ pub fn parse_listening_addresses( ipv4_tcp_port + 1 }); - // Defaults to 9090 when required + // Defaults to 9000 when required let ipv6_tcp_port = use_zero_ports .then(unused_port::unused_tcp6_port) .transpose()? @@ -1409,7 +1439,7 @@ pub fn set_network_config( } // Light client server config. - config.enable_light_client_server = parse_flag(cli_args, "light-client-server"); + config.enable_light_client_server = !parse_flag(cli_args, "disable-light-client-server"); // The self limiter is enabled by default. If the `self-limiter-protocols` flag is not provided, // the default params will be used. diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 0c4cbf0f57..e3802c837c 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -2,7 +2,6 @@ mod cli; mod config; pub use beacon_chain; -use beacon_chain::store::LevelDB; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, }; @@ -16,11 +15,19 @@ use slasher::{DatabaseBackendOverride, Slasher}; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; use std::sync::Arc; +use store::database::interface::BeaconNodeBackend; use types::{ChainSpec, Epoch, EthSpec, ForkName}; /// A type-alias to the tighten the definition of a production-intended `Client`. -pub type ProductionClient = - Client, E, LevelDB, LevelDB>>; +pub type ProductionClient = Client< + Witness< + SystemTimeSlotClock, + CachingEth1Backend, + E, + BeaconNodeBackend, + BeaconNodeBackend, + >, +>; /// The beacon node `Client` that will be used in production. /// diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 21d0cf8dec..d2f3a5c562 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -4,6 +4,11 @@ version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } +[features] +default = ["leveldb"] +leveldb = ["dep:leveldb"] +redb = ["dep:redb"] + [dev-dependencies] beacon_chain = { workspace = true } criterion = { workspace = true } @@ -17,11 +22,12 @@ directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } itertools = { workspace = true } -leveldb = { version = "0.8" } +leveldb = { version = "0.8.6", optional = true } logging = { workspace = true } lru = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } +redb = { version = "2.1.3", optional = true } safe_arith = { workspace = true } serde = { workspace = true } slog = { workspace = true } diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index 83b8da2a18..90e8c17310 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -680,7 +680,7 @@ where key: &[u8], ) -> Result, Error> { store - .get_bytes(column.into(), key)? + .get_bytes(column, key)? .map(|bytes| Self::decode(&bytes)) .transpose() } @@ -691,8 +691,11 @@ where key: &[u8], ops: &mut Vec, ) -> Result<(), Error> { - let db_key = get_key_for_col(column.into(), key); - ops.push(KeyValueStoreOp::PutKeyValue(db_key, self.encode()?)); + ops.push(KeyValueStoreOp::PutKeyValue( + column, + key.to_vec(), + self.encode()?, + )); Ok(()) } diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 4f67530570..64765fd66a 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,16 +1,23 @@ use crate::hdiff::HierarchyConfig; +use crate::superstruct; use crate::{AnchorInfo, DBColumn, Error, Split, StoreItem}; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::io::Write; use std::num::NonZeroUsize; -use superstruct::superstruct; +use strum::{Display, EnumString, EnumVariantNames}; use types::non_zero_usize::new_non_zero_usize; use types::EthSpec; use zstd::Encoder; -// Only used in tests. Mainnet sets a higher default on the CLI. +#[cfg(all(feature = "redb", not(feature = "leveldb")))] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::Redb; +#[cfg(feature = "leveldb")] +pub const DEFAULT_BACKEND: DatabaseBackend = DatabaseBackend::LevelDb; + +pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; +pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_EPOCHS_PER_STATE_DIFF: u64 = 8; pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(64); pub const DEFAULT_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128); @@ -40,6 +47,8 @@ pub struct StoreConfig { pub compact_on_prune: bool, /// Whether to prune payloads on initialization and finalization. pub prune_payloads: bool, + /// Database backend to use. + pub backend: DatabaseBackend, /// State diff hierarchy. pub hierarchy_config: HierarchyConfig, /// Whether to prune blobs older than the blob data availability boundary. @@ -104,6 +113,7 @@ impl Default for StoreConfig { compact_on_init: false, compact_on_prune: true, prune_payloads: true, + backend: DEFAULT_BACKEND, hierarchy_config: HierarchyConfig::default(), prune_blobs: true, epochs_per_blob_prune: DEFAULT_EPOCHS_PER_BLOB_PRUNE, @@ -340,3 +350,14 @@ mod test { assert_eq!(config_out, config); } } + +#[derive( + Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Display, EnumString, EnumVariantNames, +)] +#[strum(serialize_all = "lowercase")] +pub enum DatabaseBackend { + #[cfg(feature = "leveldb")] + LevelDb, + #[cfg(feature = "redb")] + Redb, +} diff --git a/beacon_node/store/src/database.rs b/beacon_node/store/src/database.rs new file mode 100644 index 0000000000..2232f73c5c --- /dev/null +++ b/beacon_node/store/src/database.rs @@ -0,0 +1,5 @@ +pub mod interface; +#[cfg(feature = "leveldb")] +pub mod leveldb_impl; +#[cfg(feature = "redb")] +pub mod redb_impl; diff --git a/beacon_node/store/src/database/interface.rs b/beacon_node/store/src/database/interface.rs new file mode 100644 index 0000000000..b213433241 --- /dev/null +++ b/beacon_node/store/src/database/interface.rs @@ -0,0 +1,220 @@ +#[cfg(feature = "leveldb")] +use crate::database::leveldb_impl; +#[cfg(feature = "redb")] +use crate::database::redb_impl; +use crate::{config::DatabaseBackend, KeyValueStoreOp, StoreConfig}; +use crate::{metrics, ColumnIter, ColumnKeyIter, DBColumn, Error, ItemStore, Key, KeyValueStore}; +use std::collections::HashSet; +use std::path::Path; +use types::EthSpec; + +pub enum BeaconNodeBackend { + #[cfg(feature = "leveldb")] + LevelDb(leveldb_impl::LevelDB), + #[cfg(feature = "redb")] + Redb(redb_impl::Redb), +} + +impl ItemStore for BeaconNodeBackend {} + +impl KeyValueStore for BeaconNodeBackend { + fn get_bytes(&self, column: DBColumn, key: &[u8]) -> Result>, Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::get_bytes(txn, column, key), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::get_bytes(txn, column, key), + } + } + + fn put_bytes(&self, column: DBColumn, key: &[u8], value: &[u8]) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::put_bytes_with_options( + txn, + column, + key, + value, + txn.write_options(), + ), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::put_bytes_with_options( + txn, + column, + key, + value, + txn.write_options(), + ), + } + } + + fn put_bytes_sync(&self, column: DBColumn, key: &[u8], value: &[u8]) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::put_bytes_with_options( + txn, + column, + key, + value, + txn.write_options_sync(), + ), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::put_bytes_with_options( + txn, + column, + key, + value, + txn.write_options_sync(), + ), + } + } + + fn sync(&self) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::sync(txn), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::sync(txn), + } + } + + fn key_exists(&self, column: DBColumn, key: &[u8]) -> Result { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::key_exists(txn, column, key), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::key_exists(txn, column, key), + } + } + + fn key_delete(&self, column: DBColumn, key: &[u8]) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::key_delete(txn, column, key), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::key_delete(txn, column, key), + } + } + + fn do_atomically(&self, batch: Vec) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::do_atomically(txn, batch), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::do_atomically(txn, batch), + } + } + + fn begin_rw_transaction(&self) -> parking_lot::MutexGuard<()> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::begin_rw_transaction(txn), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::begin_rw_transaction(txn), + } + } + + fn compact(&self) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::compact(txn), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::compact(txn), + } + } + + fn iter_column_keys_from(&self, _column: DBColumn, from: &[u8]) -> ColumnKeyIter { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => { + leveldb_impl::LevelDB::iter_column_keys_from(txn, _column, from) + } + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => { + redb_impl::Redb::iter_column_keys_from(txn, _column, from) + } + } + } + + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::iter_column_keys(txn, column), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::iter_column_keys(txn, column), + } + } + + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => { + leveldb_impl::LevelDB::iter_column_from(txn, column, from) + } + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::iter_column_from(txn, column, from), + } + } + + fn compact_column(&self, _column: DBColumn) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::compact_column(txn, _column), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::compact(txn), + } + } + + fn delete_batch(&self, col: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::delete_batch(txn, col, ops), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::delete_batch(txn, col, ops), + } + } + + fn delete_if( + &self, + column: DBColumn, + f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => leveldb_impl::LevelDB::delete_if(txn, column, f), + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::delete_if(txn, column, f), + } + } +} + +impl BeaconNodeBackend { + pub fn open(config: &StoreConfig, path: &Path) -> Result { + metrics::inc_counter_vec(&metrics::DISK_DB_TYPE, &[&config.backend.to_string()]); + match config.backend { + #[cfg(feature = "leveldb")] + DatabaseBackend::LevelDb => { + leveldb_impl::LevelDB::open(path).map(BeaconNodeBackend::LevelDb) + } + #[cfg(feature = "redb")] + DatabaseBackend::Redb => redb_impl::Redb::open(path).map(BeaconNodeBackend::Redb), + } + } +} + +pub struct WriteOptions { + /// fsync before acknowledging a write operation. + pub sync: bool, +} + +impl WriteOptions { + pub fn new() -> Self { + WriteOptions { sync: false } + } +} + +impl Default for WriteOptions { + fn default() -> Self { + Self::new() + } +} diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs new file mode 100644 index 0000000000..3d8bbe1473 --- /dev/null +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -0,0 +1,304 @@ +use crate::hot_cold_store::{BytesKey, HotColdDBError}; +use crate::Key; +use crate::{ + get_key_for_col, metrics, ColumnIter, ColumnKeyIter, DBColumn, Error, KeyValueStoreOp, +}; +use leveldb::{ + compaction::Compaction, + database::{ + batch::{Batch, Writebatch}, + kv::KV, + Database, + }, + iterator::{Iterable, LevelDBIterator}, + options::{Options, ReadOptions}, +}; +use parking_lot::{Mutex, MutexGuard}; +use std::collections::HashSet; +use std::marker::PhantomData; +use std::path::Path; +use types::{EthSpec, FixedBytesExtended, Hash256}; + +use super::interface::WriteOptions; + +pub struct LevelDB { + db: Database, + /// A mutex to synchronise sensitive read-write transactions. + transaction_mutex: Mutex<()>, + _phantom: PhantomData, +} + +impl From for leveldb::options::WriteOptions { + fn from(options: WriteOptions) -> Self { + let mut opts = leveldb::options::WriteOptions::new(); + opts.sync = options.sync; + opts + } +} + +impl LevelDB { + pub fn open(path: &Path) -> Result { + let mut options = Options::new(); + + options.create_if_missing = true; + + let db = Database::open(path, options)?; + let transaction_mutex = Mutex::new(()); + + Ok(Self { + db, + transaction_mutex, + _phantom: PhantomData, + }) + } + + pub fn read_options(&self) -> ReadOptions { + ReadOptions::new() + } + + pub fn write_options(&self) -> WriteOptions { + WriteOptions::new() + } + + pub fn write_options_sync(&self) -> WriteOptions { + let mut opts = WriteOptions::new(); + opts.sync = true; + opts + } + + pub fn put_bytes_with_options( + &self, + col: DBColumn, + key: &[u8], + val: &[u8], + opts: WriteOptions, + ) -> Result<(), Error> { + let column_key = get_key_for_col(col, key); + + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[col.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[col.into()], + val.len() as u64, + ); + let timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + + self.db + .put(opts.into(), BytesKey::from_vec(column_key), val) + .map_err(Into::into) + .map(|()| { + metrics::stop_timer(timer); + }) + } + + /// Store some `value` in `column`, indexed with `key`. + pub fn put_bytes(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { + self.put_bytes_with_options(col, key, val, self.write_options()) + } + + pub fn put_bytes_sync(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { + self.put_bytes_with_options(col, key, val, self.write_options_sync()) + } + + pub fn sync(&self) -> Result<(), Error> { + self.put_bytes_sync(DBColumn::Dummy, b"sync", b"sync") + } + + // Retrieve some bytes in `column` with `key`. + pub fn get_bytes(&self, col: DBColumn, key: &[u8]) -> Result>, Error> { + let column_key = get_key_for_col(col, key); + + metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[col.into()]); + let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES); + + self.db + .get(self.read_options(), BytesKey::from_vec(column_key)) + .map_err(Into::into) + .map(|opt| { + opt.inspect(|bytes| { + metrics::inc_counter_vec_by( + &metrics::DISK_DB_READ_BYTES, + &[col.into()], + bytes.len() as u64, + ); + metrics::stop_timer(timer); + }) + }) + } + + /// Return `true` if `key` exists in `column`. + pub fn key_exists(&self, col: DBColumn, key: &[u8]) -> Result { + let column_key = get_key_for_col(col, key); + + metrics::inc_counter_vec(&metrics::DISK_DB_EXISTS_COUNT, &[col.into()]); + + self.db + .get(self.read_options(), BytesKey::from_vec(column_key)) + .map_err(Into::into) + .map(|val| val.is_some()) + } + + /// Removes `key` from `column`. + pub fn key_delete(&self, col: DBColumn, key: &[u8]) -> Result<(), Error> { + let column_key = get_key_for_col(col, key); + + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[col.into()]); + + self.db + .delete(self.write_options().into(), BytesKey::from_vec(column_key)) + .map_err(Into::into) + } + + pub fn do_atomically(&self, ops_batch: Vec) -> Result<(), Error> { + let mut leveldb_batch = Writebatch::new(); + for op in ops_batch { + match op { + KeyValueStoreOp::PutKeyValue(col, key, value) => { + let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[col.into()], + value.len() as u64, + ); + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[col.into()]); + let column_key = get_key_for_col(col, &key); + leveldb_batch.put(BytesKey::from_vec(column_key), &value); + } + + KeyValueStoreOp::DeleteKey(col, key) => { + let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[col.into()]); + let column_key = get_key_for_col(col, &key); + leveldb_batch.delete(BytesKey::from_vec(column_key)); + } + } + } + self.db.write(self.write_options().into(), &leveldb_batch)?; + Ok(()) + } + + pub fn begin_rw_transaction(&self) -> MutexGuard<()> { + self.transaction_mutex.lock() + } + + /// Compact all values in the states and states flag columns. + pub fn compact(&self) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::DISK_DB_COMPACT_TIMES); + let endpoints = |column: DBColumn| { + ( + BytesKey::from_vec(get_key_for_col(column, Hash256::zero().as_slice())), + BytesKey::from_vec(get_key_for_col( + column, + Hash256::repeat_byte(0xff).as_slice(), + )), + ) + }; + + for (start_key, end_key) in [ + endpoints(DBColumn::BeaconStateTemporary), + endpoints(DBColumn::BeaconState), + endpoints(DBColumn::BeaconStateSummary), + ] { + self.db.compact(&start_key, &end_key); + } + + Ok(()) + } + + pub fn compact_column(&self, column: DBColumn) -> Result<(), Error> { + // Use key-size-agnostic keys [] and 0xff..ff with a minimum of 32 bytes to account for + // columns that may change size between sub-databases or schema versions. + let start_key = BytesKey::from_vec(get_key_for_col(column, &[])); + let end_key = BytesKey::from_vec(get_key_for_col( + column, + &vec![0xff; std::cmp::max(column.key_size(), 32)], + )); + self.db.compact(&start_key, &end_key); + Ok(()) + } + + pub fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + let start_key = BytesKey::from_vec(get_key_for_col(column, from)); + let iter = self.db.iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |(key, _)| key.matches_column(column)) + .map(move |(bytes_key, value)| { + metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[column.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_READ_BYTES, + &[column.into()], + value.len() as u64, + ); + let key = bytes_key.remove_column_variable(column).ok_or_else(|| { + HotColdDBError::IterationError { + unexpected_key: bytes_key.clone(), + } + })?; + Ok((K::from_bytes(key)?, value)) + }), + ) + } + + pub fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter { + let start_key = BytesKey::from_vec(get_key_for_col(column, from)); + + let iter = self.db.keys_iter(self.read_options()); + iter.seek(&start_key); + + Box::new( + iter.take_while(move |key| key.matches_column(column)) + .map(move |bytes_key| { + metrics::inc_counter_vec(&metrics::DISK_DB_KEY_READ_COUNT, &[column.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_KEY_READ_BYTES, + &[column.into()], + bytes_key.key.len() as u64, + ); + let key = &bytes_key.key[column.as_bytes().len()..]; + K::from_bytes(key) + }), + ) + } + + /// Iterate through all keys and values in a particular column. + pub fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + self.iter_column_keys_from(column, &vec![0; column.key_size()]) + } + + pub fn iter_column(&self, column: DBColumn) -> ColumnIter { + self.iter_column_from(column, &vec![0; column.key_size()]) + } + + pub fn delete_batch(&self, col: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error> { + let mut leveldb_batch = Writebatch::new(); + for op in ops { + let column_key = get_key_for_col(col, op); + leveldb_batch.delete(BytesKey::from_vec(column_key)); + } + self.db.write(self.write_options().into(), &leveldb_batch)?; + Ok(()) + } + + pub fn delete_if( + &self, + column: DBColumn, + mut f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error> { + let mut leveldb_batch = Writebatch::new(); + let iter = self.db.iter(self.read_options()); + + iter.take_while(move |(key, _)| key.matches_column(column)) + .for_each(|(key, value)| { + if f(&value).unwrap_or(false) { + let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[column.into()]); + leveldb_batch.delete(key); + } + }); + + self.db.write(self.write_options().into(), &leveldb_batch)?; + Ok(()) + } +} diff --git a/beacon_node/store/src/database/redb_impl.rs b/beacon_node/store/src/database/redb_impl.rs new file mode 100644 index 0000000000..cbe575d184 --- /dev/null +++ b/beacon_node/store/src/database/redb_impl.rs @@ -0,0 +1,319 @@ +use crate::{metrics, ColumnIter, ColumnKeyIter, Key}; +use crate::{DBColumn, Error, KeyValueStoreOp}; +use parking_lot::{Mutex, MutexGuard, RwLock}; +use redb::TableDefinition; +use std::collections::HashSet; +use std::{borrow::BorrowMut, marker::PhantomData, path::Path}; +use strum::IntoEnumIterator; +use types::EthSpec; + +use super::interface::WriteOptions; + +pub const DB_FILE_NAME: &str = "database.redb"; + +pub struct Redb { + db: RwLock, + transaction_mutex: Mutex<()>, + _phantom: PhantomData, +} + +impl From for redb::Durability { + fn from(options: WriteOptions) -> Self { + if options.sync { + redb::Durability::Immediate + } else { + redb::Durability::Eventual + } + } +} + +impl Redb { + pub fn open(path: &Path) -> Result { + let db_file = path.join(DB_FILE_NAME); + let db = redb::Database::create(db_file)?; + let transaction_mutex = Mutex::new(()); + + for column in DBColumn::iter() { + Redb::::create_table(&db, column.into())?; + } + + Ok(Self { + db: db.into(), + transaction_mutex, + _phantom: PhantomData, + }) + } + + fn create_table(db: &redb::Database, table_name: &str) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(table_name); + let tx = db.begin_write()?; + tx.open_table(table_definition)?; + tx.commit().map_err(Into::into) + } + + pub fn write_options(&self) -> WriteOptions { + WriteOptions::new() + } + + pub fn write_options_sync(&self) -> WriteOptions { + let mut opts = WriteOptions::new(); + opts.sync = true; + opts + } + + pub fn begin_rw_transaction(&self) -> MutexGuard<()> { + self.transaction_mutex.lock() + } + + pub fn put_bytes_with_options( + &self, + col: DBColumn, + key: &[u8], + val: &[u8], + opts: WriteOptions, + ) -> Result<(), Error> { + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[col.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[col.into()], + val.len() as u64, + ); + let timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + let open_db = self.db.read(); + let mut tx = open_db.begin_write()?; + tx.set_durability(opts.into()); + let mut table = tx.open_table(table_definition)?; + + table.insert(key, val).map(|_| { + metrics::stop_timer(timer); + })?; + drop(table); + tx.commit().map_err(Into::into) + } + + /// Store some `value` in `column`, indexed with `key`. + pub fn put_bytes(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { + self.put_bytes_with_options(col, key, val, self.write_options()) + } + + pub fn put_bytes_sync(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { + self.put_bytes_with_options(col, key, val, self.write_options_sync()) + } + + pub fn sync(&self) -> Result<(), Error> { + self.put_bytes_sync(DBColumn::Dummy, b"sync", b"sync") + } + + // Retrieve some bytes in `column` with `key`. + pub fn get_bytes(&self, col: DBColumn, key: &[u8]) -> Result>, Error> { + metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[col.into()]); + let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + let open_db = self.db.read(); + let tx = open_db.begin_read()?; + let table = tx.open_table(table_definition)?; + + let result = table.get(key)?; + + match result { + Some(access_guard) => { + let value = access_guard.value().to_vec(); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_READ_BYTES, + &[col.into()], + value.len() as u64, + ); + metrics::stop_timer(timer); + Ok(Some(value)) + } + None => { + metrics::stop_timer(timer); + Ok(None) + } + } + } + + /// Return `true` if `key` exists in `column`. + pub fn key_exists(&self, col: DBColumn, key: &[u8]) -> Result { + metrics::inc_counter_vec(&metrics::DISK_DB_EXISTS_COUNT, &[col.into()]); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + let open_db = self.db.read(); + let tx = open_db.begin_read()?; + let table = tx.open_table(table_definition)?; + + table + .get(key) + .map_err(Into::into) + .map(|access_guard| access_guard.is_some()) + } + + /// Removes `key` from `column`. + pub fn key_delete(&self, col: DBColumn, key: &[u8]) -> Result<(), Error> { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + let open_db = self.db.read(); + let tx = open_db.begin_write()?; + let mut table = tx.open_table(table_definition)?; + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[col.into()]); + + table.remove(key).map(|_| ())?; + drop(table); + tx.commit().map_err(Into::into) + } + + pub fn do_atomically(&self, ops_batch: Vec) -> Result<(), Error> { + let open_db = self.db.read(); + let mut tx = open_db.begin_write()?; + tx.set_durability(self.write_options().into()); + for op in ops_batch { + match op { + KeyValueStoreOp::PutKeyValue(column, key, value) => { + let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[column.into()], + value.len() as u64, + ); + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[column.into()]); + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let mut table = tx.open_table(table_definition)?; + table.insert(key.as_slice(), value.as_slice())?; + drop(table); + } + + KeyValueStoreOp::DeleteKey(column, key) => { + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[column.into()]); + let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let mut table = tx.open_table(table_definition)?; + table.remove(key.as_slice())?; + drop(table); + } + } + } + + tx.commit()?; + Ok(()) + } + + /// Compact all values in the states and states flag columns. + pub fn compact(&self) -> Result<(), Error> { + let _timer = metrics::start_timer(&metrics::DISK_DB_COMPACT_TIMES); + let mut open_db = self.db.write(); + let mut_db = open_db.borrow_mut(); + mut_db.compact().map_err(Into::into).map(|_| ()) + } + + pub fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let result = (|| { + let open_db = self.db.read(); + let read_txn = open_db.begin_read()?; + let table = read_txn.open_table(table_definition)?; + let range = table.range(from..)?; + Ok(range.map(move |res| { + let (key, _) = res?; + metrics::inc_counter_vec(&metrics::DISK_DB_KEY_READ_COUNT, &[column.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_KEY_READ_BYTES, + &[column.into()], + key.value().len() as u64, + ); + K::from_bytes(key.value()) + })) + })(); + + match result { + Ok(iter) => Box::new(iter), + Err(err) => Box::new(std::iter::once(Err(err))), + } + } + + /// Iterate through all keys and values in a particular column. + pub fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { + self.iter_column_keys_from(column, &vec![0; column.key_size()]) + } + + pub fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let result = (|| { + let open_db = self.db.read(); + let read_txn = open_db.begin_read()?; + let table = read_txn.open_table(table_definition)?; + let range = table.range(from..)?; + + Ok(range + .take_while(move |res| match res.as_ref() { + Ok((_, _)) => true, + Err(_) => false, + }) + .map(move |res| { + let (key, value) = res?; + metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[column.into()]); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_READ_BYTES, + &[column.into()], + value.value().len() as u64, + ); + Ok((K::from_bytes(key.value())?, value.value().to_vec())) + })) + })(); + + match result { + Ok(iter) => Box::new(iter), + Err(err) => Box::new(std::iter::once(Err(err))), + } + } + + pub fn iter_column(&self, column: DBColumn) -> ColumnIter { + self.iter_column_from(column, &vec![0; column.key_size()]) + } + + pub fn delete_batch(&self, col: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error> { + let open_db = self.db.read(); + let mut tx = open_db.begin_write()?; + + tx.set_durability(redb::Durability::None); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col.into()); + + let mut table = tx.open_table(table_definition)?; + table.retain(|key, _| !ops.contains(key))?; + + drop(table); + tx.commit()?; + Ok(()) + } + + pub fn delete_if( + &self, + column: DBColumn, + mut f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error> { + let open_db = self.db.read(); + let mut tx = open_db.begin_write()?; + + tx.set_durability(redb::Durability::None); + + let table_definition: TableDefinition<'_, &[u8], &[u8]> = + TableDefinition::new(column.into()); + + let mut table = tx.open_table(table_definition)?; + table.retain(|_, value| !f(value).unwrap_or(false))?; + + drop(table); + tx.commit()?; + Ok(()) + } +} diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 6bb4edee6b..41fd17ef43 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -2,6 +2,8 @@ use crate::chunked_vector::ChunkError; use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use crate::{hdiff, DBColumn}; +#[cfg(feature = "leveldb")] +use leveldb::error::Error as LevelDBError; use ssz::DecodeError; use state_processing::BlockReplayError; use types::{milhouse, BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; @@ -48,6 +50,16 @@ pub enum Error { MissingGenesisState, MissingSnapshot(Slot), BlockReplayError(BlockReplayError), + AddPayloadLogicError, + InvalidKey, + InvalidBytes, + InconsistentFork(InconsistentFork), + #[cfg(feature = "leveldb")] + LevelDbError(LevelDBError), + #[cfg(feature = "redb")] + RedbError(redb::Error), + CacheBuildError(EpochCacheError), + RandaoMixOutOfBounds, MilhouseError(milhouse::Error), Compression(std::io::Error), FinalizedStateDecreasingSlot, @@ -56,17 +68,11 @@ pub enum Error { state_root: Hash256, slot: Slot, }, - AddPayloadLogicError, - InvalidKey, - InvalidBytes, - InconsistentFork(InconsistentFork), Hdiff(hdiff::Error), - CacheBuildError(EpochCacheError), ForwardsIterInvalidColumn(DBColumn), ForwardsIterGap(DBColumn, Slot, Slot), StateShouldNotBeRequired(Slot), MissingBlock(Hash256), - RandaoMixOutOfBounds, GenesisStateUnknown, ArithError(safe_arith::ArithError), } @@ -145,6 +151,62 @@ impl From for Error { } } +#[cfg(feature = "leveldb")] +impl From for Error { + fn from(e: LevelDBError) -> Error { + Error::LevelDbError(e) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::Error) -> Self { + Error::RedbError(e) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::TableError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::TransactionError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::DatabaseError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::StorageError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::CommitError) -> Self { + Error::RedbError(e.into()) + } +} + +#[cfg(feature = "redb")] +impl From for Error { + fn from(e: redb::CompactionError) -> Self { + Error::RedbError(e.into()) + } +} + impl From for Error { fn from(e: EpochCacheError) -> Error { Error::CacheBuildError(e) diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 955bd33b30..255b7d8eac 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -4,7 +4,6 @@ use crate::{ColumnIter, DBColumn, HotColdDB, ItemStore}; use itertools::process_results; use std::marker::PhantomData; use types::{BeaconState, EthSpec, Hash256, Slot}; - pub type HybridForwardsBlockRootsIterator<'a, E, Hot, Cold> = HybridForwardsIterator<'a, E, Hot, Cold>; pub type HybridForwardsStateRootsIterator<'a, E, Hot, Cold> = @@ -159,6 +158,7 @@ impl, Cold: ItemStore> Iterator return None; } self.inner + .as_mut() .next()? .and_then(|(slot_bytes, root_bytes)| { let slot = slot_bytes diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index 5f8ed8f5e7..06393f2d21 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -1,10 +1,11 @@ //! Garbage collection process that runs at start-up to clean up the database. +use crate::database::interface::BeaconNodeBackend; use crate::hot_cold_store::HotColdDB; -use crate::{Error, LevelDB, StoreOp}; +use crate::{DBColumn, Error}; use slog::debug; use types::EthSpec; -impl HotColdDB, LevelDB> +impl HotColdDB, BeaconNodeBackend> where E: EthSpec, { @@ -16,21 +17,22 @@ where /// Delete the temporary states that were leftover by failed block imports. pub fn delete_temp_states(&self) -> Result<(), Error> { - let delete_ops = - self.iter_temporary_state_roots() - .try_fold(vec![], |mut ops, state_root| { - let state_root = state_root?; - ops.push(StoreOp::DeleteState(state_root, None)); - Result::<_, Error>::Ok(ops) - })?; - - if !delete_ops.is_empty() { + let mut ops = vec![]; + self.iter_temporary_state_roots().for_each(|state_root| { + if let Ok(state_root) = state_root { + ops.push(state_root); + } + }); + if !ops.is_empty() { debug!( self.log, "Garbage collecting {} temporary states", - delete_ops.len() + ops.len() ); - self.do_atomically_with_block_and_blobs_cache(delete_ops)?; + + self.delete_batch(DBColumn::BeaconState, ops.clone())?; + self.delete_batch(DBColumn::BeaconStateSummary, ops.clone())?; + self.delete_batch(DBColumn::BeaconStateTemporary, ops)?; } Ok(()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c29305f983..e4a857b799 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,10 +1,10 @@ use crate::config::{OnDiskStoreConfig, StoreConfig}; +use crate::database::interface::BeaconNodeBackend; use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator}; use crate::hdiff::{HDiff, HDiffBuffer, HierarchyModuli, StorageStrategy}; use crate::historic_state_cache::HistoricStateCache; use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::iter::{BlockRootsIterator, ParentRootBlockIterator, RootsIterator}; -use crate::leveldb_store::{BytesKey, LevelDB}; use crate::memory_store::MemoryStore; use crate::metadata::{ AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnInfo, PruningCheckpoint, SchemaVersion, @@ -14,12 +14,10 @@ use crate::metadata::{ }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, get_key_for_col, BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, - ItemStore, KeyValueStoreOp, StoreItem, StoreOp, + get_data_column_key, metrics, parse_data_column_key, BlobSidecarListFromRoot, ColumnKeyIter, + DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; -use crate::{metrics, parse_data_column_key}; use itertools::{process_results, Itertools}; -use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; use safe_arith::SafeArith; @@ -121,6 +119,11 @@ impl BlockCache { pub fn get_blobs<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a BlobSidecarList> { self.blob_cache.get(block_root) } + pub fn get_data_columns(&mut self, block_root: &Hash256) -> Option> { + self.data_column_cache + .get(block_root) + .map(|map| map.values().cloned().collect::>()) + } pub fn get_data_column<'a>( &'a mut self, block_root: &Hash256, @@ -231,7 +234,7 @@ impl HotColdDB, MemoryStore> { } } -impl HotColdDB, LevelDB> { +impl HotColdDB, BeaconNodeBackend> { /// Open a new or existing database, with the given paths to the hot and cold DBs. /// /// The `migrate_schema` function is passed in so that the parent `BeaconChain` can provide @@ -249,7 +252,7 @@ impl HotColdDB, LevelDB> { let hierarchy = config.hierarchy_config.to_moduli()?; - let hot_db = LevelDB::open(hot_path)?; + let hot_db = BeaconNodeBackend::open(&config, hot_path)?; let anchor_info = RwLock::new(Self::load_anchor_info(&hot_db)?); let db = HotColdDB { @@ -257,8 +260,8 @@ impl HotColdDB, LevelDB> { anchor_info, blob_info: RwLock::new(BlobInfo::default()), data_column_info: RwLock::new(DataColumnInfo::default()), - cold_db: LevelDB::open(cold_path)?, - blobs_db: LevelDB::open(blobs_db_path)?, + blobs_db: BeaconNodeBackend::open(&config, blobs_db_path)?, + cold_db: BeaconNodeBackend::open(&config, cold_path)?, hot_db, block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(StateCache::new(config.state_cache_size)), @@ -324,16 +327,15 @@ impl HotColdDB, LevelDB> { db.compare_and_set_blob_info_with_write(<_>::default(), new_blob_info.clone())?; let data_column_info = db.load_data_column_info()?; - let eip7594_fork_slot = db + let fulu_fork_slot = db .spec - .eip7594_fork_epoch + .fulu_fork_epoch .map(|epoch| epoch.start_slot(E::slots_per_epoch())); let new_data_column_info = match &data_column_info { Some(data_column_info) => { // Set the oldest data column slot to the fork slot if it is not yet set. - let oldest_data_column_slot = data_column_info - .oldest_data_column_slot - .or(eip7594_fork_slot); + let oldest_data_column_slot = + data_column_info.oldest_data_column_slot.or(fulu_fork_slot); DataColumnInfo { oldest_data_column_slot, } @@ -341,7 +343,7 @@ impl HotColdDB, LevelDB> { // First start. None => DataColumnInfo { // Set the oldest data column slot to the fork slot if it is not yet set. - oldest_data_column_slot: eip7594_fork_slot, + oldest_data_column_slot: fulu_fork_slot, }, }; db.compare_and_set_data_column_info_with_write( @@ -407,24 +409,9 @@ impl HotColdDB, LevelDB> { } /// Return an iterator over the state roots of all temporary states. - pub fn iter_temporary_state_roots(&self) -> impl Iterator> + '_ { - let column = DBColumn::BeaconStateTemporary; - let start_key = - BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_slice())); - - let keys_iter = self.hot_db.keys_iter(); - keys_iter.seek(&start_key); - - keys_iter - .take_while(move |key| key.matches_column(column)) - .map(move |bytes_key| { - bytes_key.remove_column(column).ok_or_else(|| { - HotColdDBError::IterationError { - unexpected_key: bytes_key, - } - .into() - }) - }) + pub fn iter_temporary_state_roots(&self) -> ColumnKeyIter { + self.hot_db + .iter_column_keys::(DBColumn::BeaconStateTemporary) } } @@ -529,16 +516,16 @@ impl, Cold: ItemStore> HotColdDB .ok_or(Error::AddPayloadLogicError) } - /// Prepare a signed beacon block for storage in the datbase *without* its payload. + /// Prepare a signed beacon block for storage in the database *without* its payload. pub fn blinded_block_as_kv_store_ops( &self, key: &Hash256, blinded_block: &SignedBeaconBlock>, ops: &mut Vec, ) { - let db_key = get_key_for_col(DBColumn::BeaconBlock.into(), key.as_slice()); ops.push(KeyValueStoreOp::PutKeyValue( - db_key, + DBColumn::BeaconBlock, + key.as_slice().into(), blinded_block.as_ssz_bytes(), )); } @@ -660,7 +647,7 @@ impl, Cold: ItemStore> HotColdDB decoder: impl FnOnce(&[u8]) -> Result, ssz::DecodeError>, ) -> Result>, Error> { self.hot_db - .get_bytes(DBColumn::BeaconBlock.into(), block_root.as_slice())? + .get_bytes(DBColumn::BeaconBlock, block_root.as_slice())? .map(|block_bytes| decoder(&block_bytes)) .transpose() .map_err(|e| e.into()) @@ -673,11 +660,15 @@ impl, Cold: ItemStore> HotColdDB block_root: &Hash256, fork_name: ForkName, ) -> Result>, Error> { - let column = ExecutionPayload::::db_column().into(); let key = block_root.as_slice(); - match self.hot_db.get_bytes(column, key)? { - Some(bytes) => Ok(Some(ExecutionPayload::from_ssz_bytes(&bytes, fork_name)?)), + match self + .hot_db + .get_bytes(ExecutionPayload::::db_column(), key)? + { + Some(bytes) => Ok(Some(ExecutionPayload::from_ssz_bytes_by_fork( + &bytes, fork_name, + )?)), None => Ok(None), } } @@ -705,10 +696,7 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Error> { let column = DBColumn::SyncCommitteeBranch; - if let Some(bytes) = self - .hot_db - .get_bytes(column.into(), &block_root.as_ssz_bytes())? - { + if let Some(bytes) = self.hot_db.get_bytes(column, &block_root.as_ssz_bytes())? { let sync_committee_branch = Vec::::from_ssz_bytes(&bytes)?; return Ok(Some(sync_committee_branch)); } @@ -725,7 +713,7 @@ impl, Cold: ItemStore> HotColdDB if let Some(bytes) = self .hot_db - .get_bytes(column.into(), &sync_committee_period.as_ssz_bytes())? + .get_bytes(column, &sync_committee_period.as_ssz_bytes())? { let sync_committee: SyncCommittee = SyncCommittee::from_ssz_bytes(&bytes)?; return Ok(Some(sync_committee)); @@ -741,7 +729,7 @@ impl, Cold: ItemStore> HotColdDB ) -> Result<(), Error> { let column = DBColumn::SyncCommitteeBranch; self.hot_db.put_bytes( - column.into(), + column, &block_root.as_ssz_bytes(), &sync_committee_branch.as_ssz_bytes(), )?; @@ -755,7 +743,7 @@ impl, Cold: ItemStore> HotColdDB ) -> Result<(), Error> { let column = DBColumn::SyncCommittee; self.hot_db.put_bytes( - column.into(), + column, &sync_committee_period.to_le_bytes(), &sync_committee.as_ssz_bytes(), )?; @@ -767,10 +755,10 @@ impl, Cold: ItemStore> HotColdDB &self, sync_committee_period: u64, ) -> Result>, Error> { - let column = DBColumn::LightClientUpdate; - let res = self - .hot_db - .get_bytes(column.into(), &sync_committee_period.to_le_bytes())?; + let res = self.hot_db.get_bytes( + DBColumn::LightClientUpdate, + &sync_committee_period.to_le_bytes(), + )?; if let Some(light_client_update_bytes) = res { let epoch = sync_committee_period @@ -822,10 +810,8 @@ impl, Cold: ItemStore> HotColdDB sync_committee_period: u64, light_client_update: &LightClientUpdate, ) -> Result<(), Error> { - let column = DBColumn::LightClientUpdate; - self.hot_db.put_bytes( - column.into(), + DBColumn::LightClientUpdate, &sync_committee_period.to_le_bytes(), &light_client_update.as_ssz_bytes(), )?; @@ -836,29 +822,29 @@ impl, Cold: ItemStore> HotColdDB /// Check if the blobs for a block exists on disk. pub fn blobs_exist(&self, block_root: &Hash256) -> Result { self.blobs_db - .key_exists(DBColumn::BeaconBlob.into(), block_root.as_slice()) + .key_exists(DBColumn::BeaconBlob, block_root.as_slice()) } /// Determine whether a block exists in the database. pub fn block_exists(&self, block_root: &Hash256) -> Result { self.hot_db - .key_exists(DBColumn::BeaconBlock.into(), block_root.as_slice()) + .key_exists(DBColumn::BeaconBlock, block_root.as_slice()) } /// Delete a block from the store and the block cache. pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { self.block_cache.lock().delete(block_root); self.hot_db - .key_delete(DBColumn::BeaconBlock.into(), block_root.as_slice())?; + .key_delete(DBColumn::BeaconBlock, block_root.as_slice())?; self.hot_db - .key_delete(DBColumn::ExecPayload.into(), block_root.as_slice())?; + .key_delete(DBColumn::ExecPayload, block_root.as_slice())?; self.blobs_db - .key_delete(DBColumn::BeaconBlob.into(), block_root.as_slice()) + .key_delete(DBColumn::BeaconBlob, block_root.as_slice()) } pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobSidecarList) -> Result<(), Error> { self.blobs_db.put_bytes( - DBColumn::BeaconBlob.into(), + DBColumn::BeaconBlob, block_root.as_slice(), &blobs.as_ssz_bytes(), )?; @@ -872,8 +858,29 @@ impl, Cold: ItemStore> HotColdDB blobs: BlobSidecarList, ops: &mut Vec, ) { - let db_key = get_key_for_col(DBColumn::BeaconBlob.into(), key.as_slice()); - ops.push(KeyValueStoreOp::PutKeyValue(db_key, blobs.as_ssz_bytes())); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconBlob, + key.as_slice().to_vec(), + blobs.as_ssz_bytes(), + )); + } + + pub fn put_data_columns( + &self, + block_root: &Hash256, + data_columns: DataColumnSidecarList, + ) -> Result<(), Error> { + for data_column in data_columns { + self.blobs_db.put_bytes( + DBColumn::BeaconDataColumn, + &get_data_column_key(block_root, &data_column.index), + &data_column.as_ssz_bytes(), + )?; + self.block_cache + .lock() + .put_data_column(*block_root, data_column); + } + Ok(()) } pub fn data_columns_as_kv_store_ops( @@ -883,12 +890,9 @@ impl, Cold: ItemStore> HotColdDB ops: &mut Vec, ) { for data_column in data_columns { - let db_key = get_key_for_col( - DBColumn::BeaconDataColumn.into(), - &get_data_column_key(block_root, &data_column.index), - ); ops.push(KeyValueStoreOp::PutKeyValue( - db_key, + DBColumn::BeaconDataColumn, + get_data_column_key(block_root, &data_column.index), data_column.as_ssz_bytes(), )); } @@ -1202,63 +1206,68 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::DeleteStateTemporaryFlag(state_root) => { - let db_key = - get_key_for_col(TemporaryFlag::db_column().into(), state_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(db_key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + TemporaryFlag::db_column(), + state_root.as_slice().to_vec(), + )); } StoreOp::DeleteBlock(block_root) => { - let key = get_key_for_col(DBColumn::BeaconBlock.into(), block_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconBlock, + block_root.as_slice().to_vec(), + )); } StoreOp::DeleteBlobs(block_root) => { - let key = get_key_for_col(DBColumn::BeaconBlob.into(), block_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconBlob, + block_root.as_slice().to_vec(), + )); } StoreOp::DeleteDataColumns(block_root, column_indices) => { for index in column_indices { - let key = get_key_for_col( - DBColumn::BeaconDataColumn.into(), - &get_data_column_key(&block_root, &index), - ); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + let key = get_data_column_key(&block_root, &index); + key_value_batch + .push(KeyValueStoreOp::DeleteKey(DBColumn::BeaconDataColumn, key)); } } StoreOp::DeleteState(state_root, slot) => { // Delete the hot state summary. - let state_summary_key = - get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(state_summary_key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateSummary, + state_root.as_slice().to_vec(), + )); // Delete the state temporary flag (if any). Temporary flags are commonly // created by the state advance routine. - let state_temp_key = get_key_for_col( - DBColumn::BeaconStateTemporary.into(), - state_root.as_slice(), - ); - key_value_batch.push(KeyValueStoreOp::DeleteKey(state_temp_key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateTemporary, + state_root.as_slice().to_vec(), + )); if slot.map_or(true, |slot| slot % E::slots_per_epoch() == 0) { - let state_key = - get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(state_key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconState, + state_root.as_slice().to_vec(), + )); } } StoreOp::DeleteExecutionPayload(block_root) => { - let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_slice()); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::ExecPayload, + block_root.as_slice().to_vec(), + )); } StoreOp::DeleteSyncCommitteeBranch(block_root) => { - let key = get_key_for_col( - DBColumn::SyncCommitteeBranch.into(), - block_root.as_slice(), - ); - key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + DBColumn::SyncCommitteeBranch, + block_root.as_slice().to_vec(), + )); } StoreOp::KeyValueOp(kv_op) => { @@ -1269,6 +1278,19 @@ impl, Cold: ItemStore> HotColdDB Ok(key_value_batch) } + pub fn delete_batch(&self, col: DBColumn, ops: Vec) -> Result<(), Error> { + let new_ops: HashSet<&[u8]> = ops.iter().map(|v| v.as_slice()).collect(); + self.hot_db.delete_batch(col, new_ops) + } + + pub fn delete_if( + &self, + column: DBColumn, + f: impl Fn(&[u8]) -> Result, + ) -> Result<(), Error> { + self.hot_db.delete_if(column, f) + } + pub fn do_atomically_with_block_and_blobs_cache( &self, batch: Vec>, @@ -1608,10 +1630,8 @@ impl, Cold: ItemStore> HotColdDB ) -> Result<(), Error> { ops.push(ColdStateSummary { slot }.as_kv_store_op(*state_root)); ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col( - DBColumn::BeaconStateRoots.into(), - &slot.as_u64().to_be_bytes(), - ), + DBColumn::BeaconStateRoots, + slot.as_u64().to_be_bytes().to_vec(), state_root.as_slice().to_vec(), )); Ok(()) @@ -1678,19 +1698,19 @@ impl, Cold: ItemStore> HotColdDB out }; - let key = get_key_for_col( - DBColumn::BeaconStateSnapshot.into(), - &state.slot().as_u64().to_be_bytes(), - ); - ops.push(KeyValueStoreOp::PutKeyValue(key, compressed_value)); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconStateSnapshot, + state.slot().as_u64().to_be_bytes().to_vec(), + compressed_value, + )); Ok(()) } fn load_cold_state_bytes_as_snapshot(&self, slot: Slot) -> Result>, Error> { - match self.cold_db.get_bytes( - DBColumn::BeaconStateSnapshot.into(), - &slot.as_u64().to_be_bytes(), - )? { + match self + .cold_db + .get_bytes(DBColumn::BeaconStateSnapshot, &slot.as_u64().to_be_bytes())? + { Some(bytes) => { let _timer = metrics::start_timer(&metrics::STORE_BEACON_STATE_FREEZER_DECOMPRESS_TIME); @@ -1731,11 +1751,11 @@ impl, Cold: ItemStore> HotColdDB }; let diff_bytes = diff.as_ssz_bytes(); - let key = get_key_for_col( - DBColumn::BeaconStateDiff.into(), - &state.slot().as_u64().to_be_bytes(), - ); - ops.push(KeyValueStoreOp::PutKeyValue(key, diff_bytes)); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconStateDiff, + state.slot().as_u64().to_be_bytes().to_vec(), + diff_bytes, + )); Ok(()) } @@ -1858,10 +1878,7 @@ impl, Cold: ItemStore> HotColdDB let bytes = { let _t = metrics::start_timer(&metrics::BEACON_HDIFF_READ_TIMES); self.cold_db - .get_bytes( - DBColumn::BeaconStateDiff.into(), - &slot.as_u64().to_be_bytes(), - )? + .get_bytes(DBColumn::BeaconStateDiff, &slot.as_u64().to_be_bytes())? .ok_or(HotColdDBError::MissingHDiff(slot))? }; let hdiff = { @@ -2044,6 +2061,40 @@ impl, Cold: ItemStore> HotColdDB }) } + /// Fetch columns for a given block from the store. + pub fn get_data_columns( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + if let Some(columns) = self.block_cache.lock().get_data_columns(block_root) { + metrics::inc_counter(&metrics::BEACON_DATA_COLUMNS_CACHE_HIT_COUNT); + return Ok(Some(columns)); + } + + let columns = self + .blobs_db + .iter_column_from::>(DBColumn::BeaconDataColumn, block_root.as_slice()) + .take_while(|res| { + res.as_ref() + .is_ok_and(|(key, _)| key.starts_with(block_root.as_slice())) + }) + .map(|result| { + let (_key, value) = result?; + let column = DataColumnSidecar::::from_ssz_bytes(&value).map(Arc::new)?; + self.block_cache + .lock() + .put_data_column(*block_root, column.clone()); + Ok(column) + }) + .collect::, Error>>()?; + + if columns.is_empty() { + Ok(None) + } else { + Ok(Some(columns)) + } + } + /// Fetch blobs for a given block from the store. pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { // Check the cache. @@ -2054,7 +2105,7 @@ impl, Cold: ItemStore> HotColdDB match self .blobs_db - .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_slice())? + .get_bytes(DBColumn::BeaconBlob, block_root.as_slice())? { Some(ref blobs_bytes) => { // We insert a VariableList of BlobSidecars into the db, but retrieve @@ -2084,8 +2135,12 @@ impl, Cold: ItemStore> HotColdDB /// Fetch all keys in the data_column column with prefix `block_root` pub fn get_data_column_keys(&self, block_root: Hash256) -> Result, Error> { self.blobs_db - .iter_raw_keys(DBColumn::BeaconDataColumn, block_root.as_slice()) - .map(|key| key.and_then(|key| parse_data_column_key(key).map(|key| key.1))) + .iter_column_from::>(DBColumn::BeaconDataColumn, block_root.as_slice()) + .take_while(|res| { + res.as_ref() + .is_ok_and(|(key, _)| key.starts_with(block_root.as_slice())) + }) + .map(|key| key.and_then(|(key, _)| parse_data_column_key(key).map(|key| key.1))) .collect() } @@ -2106,7 +2161,7 @@ impl, Cold: ItemStore> HotColdDB } match self.blobs_db.get_bytes( - DBColumn::BeaconDataColumn.into(), + DBColumn::BeaconDataColumn, &get_data_column_key(block_root, column_index), )? { Some(ref data_column_bytes) => { @@ -2164,10 +2219,12 @@ impl, Cold: ItemStore> HotColdDB schema_version: SchemaVersion, mut ops: Vec, ) -> Result<(), Error> { - let column = SchemaVersion::db_column().into(); let key = SCHEMA_VERSION_KEY.as_slice(); - let db_key = get_key_for_col(column, key); - let op = KeyValueStoreOp::PutKeyValue(db_key, schema_version.as_store_bytes()); + let op = KeyValueStoreOp::PutKeyValue( + SchemaVersion::db_column(), + key.to_vec(), + schema_version.as_store_bytes(), + ); ops.push(op); self.hot_db.do_atomically(ops) @@ -2278,7 +2335,7 @@ impl, Cold: ItemStore> HotColdDB /// Initialize the `DataColumnInfo` when starting from genesis or a checkpoint. pub fn init_data_column_info(&self, anchor_slot: Slot) -> Result { - let oldest_data_column_slot = self.spec.eip7594_fork_epoch.map(|fork_epoch| { + let oldest_data_column_slot = self.spec.fulu_fork_epoch.map(|fork_epoch| { std::cmp::max(anchor_slot, fork_epoch.start_slot(E::slots_per_epoch())) }); let data_column_info = DataColumnInfo { @@ -2589,7 +2646,8 @@ impl, Cold: ItemStore> HotColdDB let mut ops = vec![]; for slot in start_slot.as_u64()..end_slot.as_u64() { ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + DBColumn::BeaconBlockRoots, + slot.to_be_bytes().to_vec(), block_root.as_slice().to_vec(), )); } @@ -2811,77 +2869,62 @@ impl, Cold: ItemStore> HotColdDB "data_availability_boundary" => data_availability_boundary, ); - let mut ops = vec![]; - let mut last_pruned_block_root = None; + // We collect block roots of deleted blobs in memory. Even for 10y of blob history this + // vec won't go beyond 1GB. We can probably optimise this out eventually. + let mut removed_block_roots = vec![]; - for res in self.forwards_block_roots_iterator_until(oldest_blob_slot, end_slot, || { - let (_, split_state) = self - .get_advanced_hot_state(split.block_root, split.slot, split.state_root)? - .ok_or(HotColdDBError::MissingSplitState( - split.state_root, - split.slot, - ))?; - - Ok((split_state, split.block_root)) - })? { - let (block_root, slot) = match res { - Ok(tuple) => tuple, - Err(e) => { - warn!( - self.log, - "Stopping blob pruning early"; - "error" => ?e, - ); - break; - } + let remove_blob_if = |blobs_bytes: &[u8]| { + let blobs = Vec::from_ssz_bytes(blobs_bytes)?; + let Some(blob): Option<&Arc>> = blobs.first() else { + return Ok(false); }; - if Some(block_root) != last_pruned_block_root { - if self - .spec - .is_peer_das_enabled_for_epoch(slot.epoch(E::slots_per_epoch())) - { - // data columns - let indices = self.get_data_column_keys(block_root)?; - if !indices.is_empty() { - trace!( - self.log, - "Pruning data columns of block"; - "slot" => slot, - "block_root" => ?block_root, - ); - last_pruned_block_root = Some(block_root); - ops.push(StoreOp::DeleteDataColumns(block_root, indices)); - } - } else if self.blobs_exist(&block_root)? { - trace!( - self.log, - "Pruning blobs of block"; - "slot" => slot, - "block_root" => ?block_root, - ); - last_pruned_block_root = Some(block_root); - ops.push(StoreOp::DeleteBlobs(block_root)); - } - } + if blob.slot() <= end_slot { + // Store the block root so we can delete from the blob cache + removed_block_roots.push(blob.block_root()); + // Delete from the on-disk db + return Ok(true); + }; + Ok(false) + }; - if slot >= end_slot { - break; - } + self.blobs_db + .delete_if(DBColumn::BeaconBlob, remove_blob_if)?; + + if self.spec.is_peer_das_enabled_for_epoch(start_epoch) { + let remove_data_column_if = |blobs_bytes: &[u8]| { + let data_column: DataColumnSidecar = + DataColumnSidecar::from_ssz_bytes(blobs_bytes)?; + + if data_column.slot() <= end_slot { + return Ok(true); + }; + + Ok(false) + }; + + self.blobs_db + .delete_if(DBColumn::BeaconDataColumn, remove_data_column_if)?; } - let blob_lists_pruned = ops.len(); + + // Remove deleted blobs from the cache. + let mut block_cache = self.block_cache.lock(); + for block_root in removed_block_roots { + block_cache.delete_blobs(&block_root); + } + drop(block_cache); + let new_blob_info = BlobInfo { oldest_blob_slot: Some(end_slot + 1), blobs_db: blob_info.blobs_db, }; - let update_blob_info = self.compare_and_set_blob_info(blob_info, new_blob_info)?; - ops.push(StoreOp::KeyValueOp(update_blob_info)); - self.do_atomically_with_block_and_blobs_cache(ops)?; + let op = self.compare_and_set_blob_info(blob_info, new_blob_info)?; + self.do_atomically_with_block_and_blobs_cache(vec![StoreOp::KeyValueOp(op)])?; + debug!( self.log, "Blob pruning complete"; - "blob_lists_pruned" => blob_lists_pruned, ); Ok(()) @@ -2944,10 +2987,7 @@ impl, Cold: ItemStore> HotColdDB for column in columns { for res in self.cold_db.iter_column_keys::>(column) { let key = res?; - cold_ops.push(KeyValueStoreOp::DeleteKey(get_key_for_col( - column.as_str(), - &key, - ))); + cold_ops.push(KeyValueStoreOp::DeleteKey(column, key)); } } let delete_ops = cold_ops.len(); @@ -3085,10 +3125,8 @@ pub fn migrate_database, Cold: ItemStore>( // Store the slot to block root mapping. cold_db_block_ops.push(KeyValueStoreOp::PutKeyValue( - get_key_for_col( - DBColumn::BeaconBlockRoots.into(), - &slot.as_u64().to_be_bytes(), - ), + DBColumn::BeaconBlockRoots, + slot.as_u64().to_be_bytes().to_vec(), block_root.as_slice().to_vec(), )); @@ -3339,3 +3377,57 @@ impl StoreItem for TemporaryFlag { Ok(TemporaryFlag) } } + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct BytesKey { + pub key: Vec, +} + +impl db_key::Key for BytesKey { + fn from_u8(key: &[u8]) -> Self { + Self { key: key.to_vec() } + } + + fn as_slice T>(&self, f: F) -> T { + f(self.key.as_slice()) + } +} + +impl BytesKey { + pub fn starts_with(&self, prefix: &Self) -> bool { + self.key.starts_with(&prefix.key) + } + + /// Return `true` iff this `BytesKey` was created with the given `column`. + pub fn matches_column(&self, column: DBColumn) -> bool { + self.key.starts_with(column.as_bytes()) + } + + /// Remove the column from a key, returning its `Hash256` portion. + pub fn remove_column(&self, column: DBColumn) -> Option { + if self.matches_column(column) { + let subkey = &self.key[column.as_bytes().len()..]; + if subkey.len() == 32 { + return Some(Hash256::from_slice(subkey)); + } + } + None + } + + /// Remove the column from a key. + /// + /// Will return `None` if the value doesn't match the column or has the wrong length. + pub fn remove_column_variable(&self, column: DBColumn) -> Option<&[u8]> { + if self.matches_column(column) { + let subkey = &self.key[column.as_bytes().len()..]; + if subkey.len() == column.key_size() { + return Some(subkey); + } + } + None + } + + pub fn from_vec(key: Vec) -> Self { + Self { key } + } +} diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index 48c289f2b2..fd08e547f1 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -13,8 +13,11 @@ pub fn store_full_state( }; metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as u64); metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); - let key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); - ops.push(KeyValueStoreOp::PutKeyValue(key, bytes)); + ops.push(KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconState, + state_root.as_slice().to_vec(), + bytes, + )); Ok(()) } @@ -25,7 +28,7 @@ pub fn get_full_state, E: EthSpec>( ) -> Result>, Error> { let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); - match db.get_bytes(DBColumn::BeaconState.into(), state_root.as_slice())? { + match db.get_bytes(DBColumn::BeaconState, state_root.as_slice())? { Some(bytes) => { let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES); let container = StorageContainer::from_ssz_bytes(&bytes, spec)?; diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs deleted file mode 100644 index 720afd0f3f..0000000000 --- a/beacon_node/store/src/leveldb_store.rs +++ /dev/null @@ -1,310 +0,0 @@ -use super::*; -use crate::hot_cold_store::HotColdDBError; -use leveldb::compaction::Compaction; -use leveldb::database::batch::{Batch, Writebatch}; -use leveldb::database::kv::KV; -use leveldb::database::Database; -use leveldb::error::Error as LevelDBError; -use leveldb::iterator::{Iterable, KeyIterator, LevelDBIterator}; -use leveldb::options::{Options, ReadOptions, WriteOptions}; -use parking_lot::Mutex; -use std::marker::PhantomData; -use std::path::Path; - -/// A wrapped leveldb database. -pub struct LevelDB { - db: Database, - /// A mutex to synchronise sensitive read-write transactions. - transaction_mutex: Mutex<()>, - _phantom: PhantomData, -} - -impl LevelDB { - /// Open a database at `path`, creating a new database if one does not already exist. - pub fn open(path: &Path) -> Result { - let mut options = Options::new(); - - options.create_if_missing = true; - - let db = Database::open(path, options)?; - let transaction_mutex = Mutex::new(()); - - Ok(Self { - db, - transaction_mutex, - _phantom: PhantomData, - }) - } - - fn read_options(&self) -> ReadOptions { - ReadOptions::new() - } - - fn write_options(&self) -> WriteOptions { - WriteOptions::new() - } - - fn write_options_sync(&self) -> WriteOptions { - let mut opts = WriteOptions::new(); - opts.sync = true; - opts - } - - fn put_bytes_with_options( - &self, - col: &str, - key: &[u8], - val: &[u8], - opts: WriteOptions, - ) -> Result<(), Error> { - let column_key = get_key_for_col(col, key); - - metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[col]); - metrics::inc_counter_vec_by(&metrics::DISK_DB_WRITE_BYTES, &[col], val.len() as u64); - let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); - - self.db - .put(opts, BytesKey::from_vec(column_key), val) - .map_err(Into::into) - } - - pub fn keys_iter(&self) -> KeyIterator { - self.db.keys_iter(self.read_options()) - } -} - -impl KeyValueStore for LevelDB { - /// Store some `value` in `column`, indexed with `key`. - fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { - self.put_bytes_with_options(col, key, val, self.write_options()) - } - - fn put_bytes_sync(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { - self.put_bytes_with_options(col, key, val, self.write_options_sync()) - } - - fn sync(&self) -> Result<(), Error> { - self.put_bytes_sync("sync", b"sync", b"sync") - } - - /// Retrieve some bytes in `column` with `key`. - fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { - let column_key = get_key_for_col(col, key); - - metrics::inc_counter_vec(&metrics::DISK_DB_READ_COUNT, &[col]); - let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES); - - self.db - .get(self.read_options(), BytesKey::from_vec(column_key)) - .map_err(Into::into) - .map(|opt| { - opt.inspect(|bytes| { - metrics::inc_counter_vec_by( - &metrics::DISK_DB_READ_BYTES, - &[col], - bytes.len() as u64, - ); - metrics::stop_timer(timer); - }) - }) - } - - /// Return `true` if `key` exists in `column`. - fn key_exists(&self, col: &str, key: &[u8]) -> Result { - let column_key = get_key_for_col(col, key); - - metrics::inc_counter_vec(&metrics::DISK_DB_EXISTS_COUNT, &[col]); - - self.db - .get(self.read_options(), BytesKey::from_vec(column_key)) - .map_err(Into::into) - .map(|val| val.is_some()) - } - - /// Removes `key` from `column`. - fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { - let column_key = get_key_for_col(col, key); - - metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[col]); - - self.db - .delete(self.write_options(), BytesKey::from_vec(column_key)) - .map_err(Into::into) - } - - fn do_atomically(&self, ops_batch: Vec) -> Result<(), Error> { - let mut leveldb_batch = Writebatch::new(); - for op in ops_batch { - match op { - KeyValueStoreOp::PutKeyValue(key, value) => { - let col = get_col_from_key(&key).unwrap_or("unknown".to_owned()); - metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[&col]); - metrics::inc_counter_vec_by( - &metrics::DISK_DB_WRITE_BYTES, - &[&col], - value.len() as u64, - ); - - leveldb_batch.put(BytesKey::from_vec(key), &value); - } - - KeyValueStoreOp::DeleteKey(key) => { - let col = get_col_from_key(&key).unwrap_or("unknown".to_owned()); - metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[&col]); - - leveldb_batch.delete(BytesKey::from_vec(key)); - } - } - } - - let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); - - self.db.write(self.write_options(), &leveldb_batch)?; - Ok(()) - } - - fn begin_rw_transaction(&self) -> MutexGuard<()> { - self.transaction_mutex.lock() - } - - fn compact_column(&self, column: DBColumn) -> Result<(), Error> { - // Use key-size-agnostic keys [] and 0xff..ff with a minimum of 32 bytes to account for - // columns that may change size between sub-databases or schema versions. - let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), &[])); - let end_key = BytesKey::from_vec(get_key_for_col( - column.as_str(), - &vec![0xff; std::cmp::max(column.key_size(), 32)], - )); - self.db.compact(&start_key, &end_key); - Ok(()) - } - - fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { - let start_key = BytesKey::from_vec(get_key_for_col(column.into(), from)); - let iter = self.db.iter(self.read_options()); - iter.seek(&start_key); - - Box::new( - iter.take_while(move |(key, _)| key.matches_column(column)) - .map(move |(bytes_key, value)| { - let key = bytes_key.remove_column_variable(column).ok_or_else(|| { - HotColdDBError::IterationError { - unexpected_key: bytes_key.clone(), - } - })?; - Ok((K::from_bytes(key)?, value)) - }), - ) - } - - fn iter_raw_entries(&self, column: DBColumn, prefix: &[u8]) -> RawEntryIter { - let start_key = BytesKey::from_vec(get_key_for_col(column.into(), prefix)); - - let iter = self.db.iter(self.read_options()); - iter.seek(&start_key); - - Box::new( - iter.take_while(move |(key, _)| key.key.starts_with(start_key.key.as_slice())) - .map(move |(bytes_key, value)| { - let subkey = &bytes_key.key[column.as_bytes().len()..]; - Ok((Vec::from(subkey), value)) - }), - ) - } - - fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter { - let start_key = BytesKey::from_vec(get_key_for_col(column.into(), prefix)); - - let iter = self.db.keys_iter(self.read_options()); - iter.seek(&start_key); - - Box::new( - iter.take_while(move |key| key.key.starts_with(start_key.key.as_slice())) - .map(move |bytes_key| { - let subkey = &bytes_key.key[column.as_bytes().len()..]; - Ok(Vec::from(subkey)) - }), - ) - } - - /// Iterate through all keys and values in a particular column. - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { - let start_key = - BytesKey::from_vec(get_key_for_col(column.into(), &vec![0; column.key_size()])); - - let iter = self.db.keys_iter(self.read_options()); - iter.seek(&start_key); - - Box::new( - iter.take_while(move |key| key.matches_column(column)) - .map(move |bytes_key| { - let key = bytes_key.remove_column_variable(column).ok_or_else(|| { - HotColdDBError::IterationError { - unexpected_key: bytes_key.clone(), - } - })?; - K::from_bytes(key) - }), - ) - } -} - -impl ItemStore for LevelDB {} - -/// Used for keying leveldb. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct BytesKey { - key: Vec, -} - -impl db_key::Key for BytesKey { - fn from_u8(key: &[u8]) -> Self { - Self { key: key.to_vec() } - } - - fn as_slice T>(&self, f: F) -> T { - f(self.key.as_slice()) - } -} - -impl BytesKey { - pub fn starts_with(&self, prefix: &Self) -> bool { - self.key.starts_with(&prefix.key) - } - - /// Return `true` iff this `BytesKey` was created with the given `column`. - pub fn matches_column(&self, column: DBColumn) -> bool { - self.key.starts_with(column.as_bytes()) - } - - /// Remove the column from a 32 byte key, yielding the `Hash256` key. - pub fn remove_column(&self, column: DBColumn) -> Option { - let key = self.remove_column_variable(column)?; - (column.key_size() == 32).then(|| Hash256::from_slice(key)) - } - - /// Remove the column from a key. - /// - /// Will return `None` if the value doesn't match the column or has the wrong length. - pub fn remove_column_variable(&self, column: DBColumn) -> Option<&[u8]> { - if self.matches_column(column) { - let subkey = &self.key[column.as_bytes().len()..]; - if subkey.len() == column.key_size() { - return Some(subkey); - } - } - None - } - - pub fn from_vec(key: Vec) -> Self { - Self { key } - } -} - -impl From for Error { - fn from(e: LevelDBError) -> Error { - Error::DBError { - message: format!("{:?}", e), - } - } -} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 1458fa846c..0cfc42ab15 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -19,7 +19,6 @@ pub mod hdiff; pub mod historic_state_cache; pub mod hot_cold_store; mod impls; -mod leveldb_store; mod memory_store; pub mod metadata; pub mod metrics; @@ -27,13 +26,13 @@ pub mod partial_beacon_state; pub mod reconstruct; pub mod state_cache; +pub mod database; pub mod iter; pub use self::blob_sidecar_list_from_root::BlobSidecarListFromRoot; pub use self::config::StoreConfig; pub use self::consensus_context::OnDiskConsensusContext; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; -pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; pub use crate::metadata::BlobInfo; pub use errors::Error; @@ -41,8 +40,9 @@ pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; pub use metrics::scrape_for_metrics; use parking_lot::MutexGuard; +use std::collections::HashSet; use std::sync::Arc; -use strum::{EnumString, IntoStaticStr}; +use strum::{EnumIter, EnumString, IntoStaticStr}; pub use types::*; const DATA_COLUMN_DB_KEY_SIZE: usize = 32 + 8; @@ -50,18 +50,18 @@ const DATA_COLUMN_DB_KEY_SIZE: usize = 32 + 8; pub type ColumnIter<'a, K> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a, K> = Box> + 'a>; -pub type RawEntryIter<'a> = Box, Vec), Error>> + 'a>; -pub type RawKeyIter<'a> = Box, Error>> + 'a>; +pub type RawEntryIter<'a> = + Result, Vec), Error>> + 'a>, Error>; pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Retrieve some bytes in `column` with `key`. - fn get_bytes(&self, column: &str, key: &[u8]) -> Result>, Error>; + fn get_bytes(&self, column: DBColumn, key: &[u8]) -> Result>, Error>; /// Store some `value` in `column`, indexed with `key`. - fn put_bytes(&self, column: &str, key: &[u8], value: &[u8]) -> Result<(), Error>; + fn put_bytes(&self, column: DBColumn, key: &[u8], value: &[u8]) -> Result<(), Error>; /// Same as put_bytes() but also force a flush to disk - fn put_bytes_sync(&self, column: &str, key: &[u8], value: &[u8]) -> Result<(), Error>; + fn put_bytes_sync(&self, column: DBColumn, key: &[u8], value: &[u8]) -> Result<(), Error>; /// Flush to disk. See /// https://chromium.googlesource.com/external/leveldb/+/HEAD/doc/index.md#synchronous-writes @@ -69,10 +69,10 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { fn sync(&self) -> Result<(), Error>; /// Return `true` if `key` exists in `column`. - fn key_exists(&self, column: &str, key: &[u8]) -> Result; + fn key_exists(&self, column: DBColumn, key: &[u8]) -> Result; /// Removes `key` from `column`. - fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>; + fn key_delete(&self, column: DBColumn, key: &[u8]) -> Result<(), Error>; /// Execute either all of the operations in `batch` or none at all, returning an error. fn do_atomically(&self, batch: Vec) -> Result<(), Error>; @@ -105,17 +105,21 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { self.iter_column_from(column, &vec![0; column.key_size()]) } - /// Iterate through all keys and values in a column from a given starting point. + /// Iterate through all keys and values in a column from a given starting point that fulfill the given predicate. fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter; - fn iter_raw_entries(&self, _column: DBColumn, _prefix: &[u8]) -> RawEntryIter { - Box::new(std::iter::empty()) - } - - fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter; + fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter; /// Iterate through all keys in a particular column. - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter; + fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter; + + fn delete_batch(&self, column: DBColumn, ops: HashSet<&[u8]>) -> Result<(), Error>; + + fn delete_if( + &self, + column: DBColumn, + f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error>; } pub trait Key: Sized + 'static { @@ -138,7 +142,7 @@ impl Key for Vec { } } -pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec { +pub fn get_key_for_col(column: DBColumn, key: &[u8]) -> Vec { let mut result = column.as_bytes().to_vec(); result.extend_from_slice(key); result @@ -176,14 +180,18 @@ pub fn parse_data_column_key(data: Vec) -> Result<(Hash256, ColumnIndex), Er #[must_use] #[derive(Clone)] pub enum KeyValueStoreOp { - PutKeyValue(Vec, Vec), - DeleteKey(Vec), + // Indicate that a PUT operation should be made + // to the db store for a (Column, Key, Value) + PutKeyValue(DBColumn, Vec, Vec), + // Indicate that a DELETE operation should be made + // to the db store for a (Column, Key) + DeleteKey(DBColumn, Vec), } pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'static { /// Store an item in `Self`. fn put(&self, key: &Hash256, item: &I) -> Result<(), Error> { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); self.put_bytes(column, key, &item.as_store_bytes()) @@ -191,7 +199,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati } fn put_sync(&self, key: &Hash256, item: &I) -> Result<(), Error> { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); self.put_bytes_sync(column, key, &item.as_store_bytes()) @@ -200,7 +208,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Retrieve an item from `Self`. fn get(&self, key: &Hash256) -> Result, Error> { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); match self.get_bytes(column, key)? { @@ -211,7 +219,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Returns `true` if the given key represents an item in `Self`. fn exists(&self, key: &Hash256) -> Result { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); self.key_exists(column, key) @@ -219,7 +227,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Remove an item from `Self`. fn delete(&self, key: &Hash256) -> Result<(), Error> { - let column = I::db_column().into(); + let column = I::db_column(); let key = key.as_slice(); self.key_delete(column, key) @@ -247,7 +255,7 @@ pub enum StoreOp<'a, E: EthSpec> { } /// A unique column identifier. -#[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr, EnumString)] +#[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr, EnumString, EnumIter)] pub enum DBColumn { /// For data related to the database itself. #[strum(serialize = "bma")] @@ -351,6 +359,9 @@ pub enum DBColumn { /// For helping persist eagerly computed light client bootstrap data #[strum(serialize = "scm")] SyncCommittee, + /// The dummy table is used to force the db to sync + #[strum(serialize = "dmy")] + Dummy, } /// A block from the database, which might have an execution payload or not. @@ -401,7 +412,8 @@ impl DBColumn { | Self::BeaconStateDiff | Self::SyncCommittee | Self::SyncCommitteeBranch - | Self::LightClientUpdate => 8, + | Self::LightClientUpdate + | Self::Dummy => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, } } @@ -421,13 +433,18 @@ pub trait StoreItem: Sized { fn from_store_bytes(bytes: &[u8]) -> Result; fn as_kv_store_op(&self, key: Hash256) -> KeyValueStoreOp { - let db_key = get_key_for_col(Self::db_column().into(), key.as_slice()); - KeyValueStoreOp::PutKeyValue(db_key, self.as_store_bytes()) + KeyValueStoreOp::PutKeyValue( + Self::db_column(), + key.as_slice().to_vec(), + self.as_store_bytes(), + ) } } #[cfg(test)] mod tests { + use crate::database::interface::BeaconNodeBackend; + use super::*; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -477,7 +494,7 @@ mod tests { fn simplediskdb() { let dir = tempdir().unwrap(); let path = dir.path(); - let store = LevelDB::open(path).unwrap(); + let store = BeaconNodeBackend::open(&StoreConfig::default(), path).unwrap(); test_impl(store); } @@ -508,7 +525,7 @@ mod tests { #[test] fn test_get_col_from_key() { - let key = get_key_for_col(DBColumn::BeaconBlock.into(), &[1u8; 32]); + let key = get_key_for_col(DBColumn::BeaconBlock, &[1u8; 32]); let col = get_col_from_key(&key).unwrap(); assert_eq!(col, "blk"); } diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 4c7bfdf10f..6070a2d3f0 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,9 +1,9 @@ use crate::{ - get_key_for_col, leveldb_store::BytesKey, ColumnIter, ColumnKeyIter, DBColumn, Error, - ItemStore, Key, KeyValueStore, KeyValueStoreOp, RawKeyIter, + errors::Error as DBError, get_key_for_col, hot_cold_store::BytesKey, ColumnIter, ColumnKeyIter, + DBColumn, Error, ItemStore, Key, KeyValueStore, KeyValueStoreOp, }; use parking_lot::{Mutex, MutexGuard, RwLock}; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashSet}; use std::marker::PhantomData; use types::*; @@ -29,19 +29,19 @@ impl MemoryStore { impl KeyValueStore for MemoryStore { /// Get the value of some key from the database. Returns `None` if the key does not exist. - fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { + fn get_bytes(&self, col: DBColumn, key: &[u8]) -> Result>, Error> { let column_key = BytesKey::from_vec(get_key_for_col(col, key)); Ok(self.db.read().get(&column_key).cloned()) } /// Puts a key in the database. - fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { + fn put_bytes(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { let column_key = BytesKey::from_vec(get_key_for_col(col, key)); self.db.write().insert(column_key, val.to_vec()); Ok(()) } - fn put_bytes_sync(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { + fn put_bytes_sync(&self, col: DBColumn, key: &[u8], val: &[u8]) -> Result<(), Error> { self.put_bytes(col, key, val) } @@ -51,13 +51,13 @@ impl KeyValueStore for MemoryStore { } /// Return true if some key exists in some column. - fn key_exists(&self, col: &str, key: &[u8]) -> Result { + fn key_exists(&self, col: DBColumn, key: &[u8]) -> Result { let column_key = BytesKey::from_vec(get_key_for_col(col, key)); Ok(self.db.read().contains_key(&column_key)) } /// Delete some key from the database. - fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { + fn key_delete(&self, col: DBColumn, key: &[u8]) -> Result<(), Error> { let column_key = BytesKey::from_vec(get_key_for_col(col, key)); self.db.write().remove(&column_key); Ok(()) @@ -66,12 +66,16 @@ impl KeyValueStore for MemoryStore { fn do_atomically(&self, batch: Vec) -> Result<(), Error> { for op in batch { match op { - KeyValueStoreOp::PutKeyValue(key, value) => { - self.db.write().insert(BytesKey::from_vec(key), value); + KeyValueStoreOp::PutKeyValue(col, key, value) => { + let column_key = get_key_for_col(col, &key); + self.db + .write() + .insert(BytesKey::from_vec(column_key), value); } - KeyValueStoreOp::DeleteKey(key) => { - self.db.write().remove(&BytesKey::from_vec(key)); + KeyValueStoreOp::DeleteKey(col, key) => { + let column_key = get_key_for_col(col, &key); + self.db.write().remove(&BytesKey::from_vec(column_key)); } } } @@ -82,8 +86,7 @@ impl KeyValueStore for MemoryStore { // We use this awkward pattern because we can't lock the `self.db` field *and* maintain a // reference to the lock guard across calls to `.next()`. This would be require a // struct with a field (the iterator) which references another field (the lock guard). - let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), from)); - let col = column.as_str(); + let start_key = BytesKey::from_vec(get_key_for_col(column, from)); let keys = self .db .read() @@ -92,7 +95,7 @@ impl KeyValueStore for MemoryStore { .filter_map(|(k, _)| k.remove_column_variable(column).map(|k| k.to_vec())) .collect::>(); Box::new(keys.into_iter().filter_map(move |key| { - self.get_bytes(col, &key).transpose().map(|res| { + self.get_bytes(column, &key).transpose().map(|res| { let k = K::from_bytes(&key)?; let v = res?; Ok((k, v)) @@ -100,18 +103,6 @@ impl KeyValueStore for MemoryStore { })) } - fn iter_raw_keys(&self, column: DBColumn, prefix: &[u8]) -> RawKeyIter { - let start_key = BytesKey::from_vec(get_key_for_col(column.as_str(), prefix)); - let keys = self - .db - .read() - .range(start_key.clone()..) - .take_while(|(k, _)| k.starts_with(&start_key)) - .filter_map(|(k, _)| k.remove_column_variable(column).map(|k| k.to_vec())) - .collect::>(); - Box::new(keys.into_iter().map(Ok)) - } - fn iter_column_keys(&self, column: DBColumn) -> ColumnKeyIter { Box::new(self.iter_column(column).map(|res| res.map(|(k, _)| k))) } @@ -123,6 +114,44 @@ impl KeyValueStore for MemoryStore { fn compact_column(&self, _column: DBColumn) -> Result<(), Error> { Ok(()) } + + fn iter_column_keys_from(&self, column: DBColumn, from: &[u8]) -> ColumnKeyIter { + // We use this awkward pattern because we can't lock the `self.db` field *and* maintain a + // reference to the lock guard across calls to `.next()`. This would be require a + // struct with a field (the iterator) which references another field (the lock guard). + let start_key = BytesKey::from_vec(get_key_for_col(column, from)); + let keys = self + .db + .read() + .range(start_key..) + .take_while(|(k, _)| k.remove_column_variable(column).is_some()) + .filter_map(|(k, _)| k.remove_column_variable(column).map(|k| k.to_vec())) + .collect::>(); + Box::new(keys.into_iter().map(move |key| K::from_bytes(&key))) + } + + fn delete_batch(&self, col: DBColumn, ops: HashSet<&[u8]>) -> Result<(), DBError> { + for op in ops { + let column_key = get_key_for_col(col, op); + self.db.write().remove(&BytesKey::from_vec(column_key)); + } + Ok(()) + } + + fn delete_if( + &self, + column: DBColumn, + mut f: impl FnMut(&[u8]) -> Result, + ) -> Result<(), Error> { + self.db.write().retain(|key, value| { + if key.remove_column_variable(column).is_some() { + !f(value).unwrap_or(false) + } else { + true + } + }); + Ok(()) + } } impl ItemStore for MemoryStore {} diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 3f076a767a..1d70e105b9 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -225,10 +225,10 @@ impl StoreItem for BlobInfo { pub struct DataColumnInfo { /// The slot after which data columns are or *will be* available (>=). /// - /// If this slot is in the future, then it is the first slot of the EIP-7594 fork, from which + /// If this slot is in the future, then it is the first slot of the Fulu fork, from which /// data columns will be available. /// - /// If the `oldest_data_column_slot` is `None` then this means that the EIP-7594 fork epoch is + /// If the `oldest_data_column_slot` is `None` then this means that the Fulu fork epoch is /// not yet known. pub oldest_data_column_slot: Option, } diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index f0dd061790..6f9f667917 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -33,6 +33,13 @@ pub static DISK_DB_READ_BYTES: LazyLock> = LazyLock::new(| &["col"], ) }); +pub static DISK_DB_KEY_READ_BYTES: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "store_disk_db_key_read_bytes_total", + "Number of key bytes read from the hot on-disk DB", + &["col"], + ) +}); pub static DISK_DB_READ_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( "store_disk_db_read_count_total", @@ -40,6 +47,13 @@ pub static DISK_DB_READ_COUNT: LazyLock> = LazyLock::new(| &["col"], ) }); +pub static DISK_DB_KEY_READ_COUNT: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "store_disk_db_read_count_total", + "Total number of key reads to the hot on-disk DB", + &["col"], + ) +}); pub static DISK_DB_WRITE_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( "store_disk_db_write_count_total", @@ -66,6 +80,12 @@ pub static DISK_DB_EXISTS_COUNT: LazyLock> = LazyLock::new &["col"], ) }); +pub static DISK_DB_DELETE_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_disk_db_delete_seconds", + "Time taken to delete bytes from the store.", + ) +}); pub static DISK_DB_DELETE_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( "store_disk_db_delete_count_total", @@ -73,6 +93,19 @@ pub static DISK_DB_DELETE_COUNT: LazyLock> = LazyLock::new &["col"], ) }); +pub static DISK_DB_COMPACT_TIMES: LazyLock> = LazyLock::new(|| { + try_create_histogram( + "store_disk_db_compact_seconds", + "Time taken to run compaction on the DB.", + ) +}); +pub static DISK_DB_TYPE: LazyLock> = LazyLock::new(|| { + try_create_int_counter_vec( + "store_disk_db_type", + "The on-disk database type being used", + &["db_type"], + ) +}); /* * Anchor Info */ diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 0b8bc2e0d4..d209512159 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -2,8 +2,8 @@ use crate::chunked_vector::{ load_variable_list_from_db, load_vector_from_db, BlockRootsChunked, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRootsChunked, }; -use crate::{Error, KeyValueStore}; -use ssz::{Decode, DecodeError}; +use crate::{DBColumn, Error, KeyValueStore, KeyValueStoreOp}; +use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; use types::historical_summary::HistoricalSummary; @@ -172,6 +172,15 @@ impl PartialBeaconState { )) } + /// Prepare the partial state for storage in the KV database. + pub fn as_kv_store_op(&self, state_root: Hash256) -> KeyValueStoreOp { + KeyValueStoreOp::PutKeyValue( + DBColumn::BeaconState, + state_root.as_slice().to_vec(), + self.as_ssz_bytes(), + ) + } + pub fn load_block_roots>( &mut self, store: &S, diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index c0f6b5485e..0dc1000aa0 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -162,8 +162,8 @@ To listen over both IPv4 and IPv6: > > **IPv6**: > -> It listens on the default value of --port6 (`9090`) for both UDP and TCP. -> QUIC will use port `9091` for UDP, which is the default `--port6` value (`9090`) + 1. +> It listens on the default value of --port6 (`9000`) for both UDP and TCP. +> QUIC will use port `9001` for UDP, which is the default `--port6` value (`9000`) + 1. > When using `--listen-address :: --listen-address --port 9909 --discovery-port6 9999`, listening will be set up as follows: > @@ -174,8 +174,8 @@ To listen over both IPv4 and IPv6: > > **IPv6**: > -> It listens on the default value of `--port6` (`9090`) for TCP, and port `9999` for UDP. -> QUIC will use port `9091` for UDP, which is the default `--port6` value (`9090`) + 1. +> It listens on the default value of `--port6` (`9000`) for TCP, and port `9999` for UDP. +> QUIC will use port `9001` for UDP, which is the default `--port6` value (`9000`) + 1. ### Configuring Lighthouse to advertise IPv6 reachable addresses diff --git a/book/src/help_bn.md b/book/src/help_bn.md index a4ab44748c..cbcb1ec5a3 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -11,6 +11,9 @@ Options: --auto-compact-db Enable or disable automatic compaction of the database on finalization. [default: true] + --beacon-node-backend + Set the database backend to be used by the beacon node. [possible + values: leveldb] --blob-prune-margin-epochs The margin for blob pruning in epochs. The oldest blobs are pruned up until data_availability_boundary - blob_prune_margin_epochs. [default: @@ -227,7 +230,7 @@ Options: peer without an ENR. --listen-address [
...] The address lighthouse will listen for UDP and TCP connections. To - listen over IpV4 and IpV6 set this flag twice with the different + listen over IPv4 and IPv6 set this flag twice with the different values. Examples: - --listen-address '0.0.0.0' will listen over IPv4. @@ -235,7 +238,8 @@ Options: - --listen-address '0.0.0.0' --listen-address '::' will listen over both IPv4 and IPv6. The order of the given addresses is not relevant. However, multiple IPv4, or multiple IPv6 addresses will not be - accepted. [default: 0.0.0.0] + accepted. If omitted, Lighthouse will listen on all interfaces, for + both IPv4 and IPv6. --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] @@ -298,8 +302,8 @@ Options: [default: 9000] --port6 The TCP/UDP ports to listen on over IPv6 when listening over both IPv4 - and IPv6. Defaults to 9090 when required. The Quic UDP port will be - set to this value + 1. [default: 9090] + and IPv6. Defaults to --port. The Quic UDP port will be set to this + value + 1. --prepare-payload-lookahead The time before the start of a proposal slot at which payload attributes should be sent. Low values are useful for execution nodes @@ -455,6 +459,8 @@ Flags: boot. --disable-inbound-rate-limiter Disables the inbound rate limiter (requests received by this node). + --disable-light-client-server + Disables light client support on the p2p network --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning @@ -508,8 +514,7 @@ Flags: already-subscribed subnets, use with --subscribe-all-subnets to ensure all attestations are received for import. --light-client-server - Act as a full node supporting light clients on the p2p network - [experimental] + DEPRECATED --log-color Force outputting colors when emitting logs to the terminal. --logfile-compress diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 71e21d68c9..948a09f44d 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -18,16 +18,16 @@ Options: certificate path. --broadcast Comma-separated list of beacon API topics to broadcast to all beacon - nodes. Possible values are: none, attestations, blocks, subscriptions, - sync-committee. Default (when flag is omitted) is to broadcast - subscriptions only. + nodes. Default (when flag is omitted) is to broadcast subscriptions + only. [possible values: none, attestations, blocks, subscriptions, + sync-committee] --builder-boost-factor Defines the boost factor, a percentage multiplier to apply to the builder's payload value when choosing between a builder payload header and payload from the local execution node. - --builder-registration-timestamp-override + --builder-registration-timestamp-override This flag takes a unix timestamp value that will be used to override - the timestamp used in the builder api registration + the timestamp used in the builder api registration. -d, --datadir Used to specify a custom root data directory for lighthouse keys and databases. Defaults to $HOME/.lighthouse/{network} where network is @@ -41,7 +41,7 @@ Options: The gas limit to be used in all builder proposals for all validators managed by this validator client. Note this will not necessarily be used if the gas limit set here moves too far from the previous block's - gas limit. [default: 30,000,000] + gas limit. [default: 30000000] --genesis-state-url A URL of a beacon-API compatible server from which to download the genesis state. Checkpoint sync server URLs can generally be used with @@ -68,7 +68,8 @@ Options: is supplied, the CORS allowed origin is set to the listen address of this server (e.g., http://localhost:5062). --http-port - Set the listen TCP port for the RESTful HTTP API server. + Set the listen TCP port for the RESTful HTTP API server. [default: + 5062] --http-token-path Path to file containing the HTTP API token for validator client authentication. If not specified, defaults to @@ -96,6 +97,7 @@ Options: set to 0, background file logging is disabled. [default: 200] --metrics-address
Set the listen address for the Prometheus metrics HTTP server. + [default: 127.0.0.1] --metrics-allow-origin Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not recommended in production). If no value @@ -103,6 +105,7 @@ Options: this server (e.g., http://localhost:5064). --metrics-port Set the listen TCP port for the Prometheus metrics HTTP server. + [default: 5064] --monitoring-endpoint
Enables the monitoring service for sending system metrics to a remote endpoint. This can be used to monitor your setup on certain services @@ -113,7 +116,7 @@ Options: provide an untrusted URL. --monitoring-endpoint-period Defines how many seconds to wait between each message sent to the - monitoring-endpoint. Default: 60s + monitoring-endpoint. [default: 60] --network Name of the Eth2 chain Lighthouse will sync and follow. [possible values: mainnet, gnosis, chiado, sepolia, holesky] @@ -145,8 +148,8 @@ Options: each validator along with the common slashing protection database and the validator_definitions.yml --web3-signer-keep-alive-timeout - Keep-alive timeout for each web3signer connection. Set to 'null' to - never timeout [default: 20000] + Keep-alive timeout for each web3signer connection. Set to '0' to never + timeout. [default: 20000] --web3-signer-max-idle-connections Maximum number of idle connections to maintain per web3signer host. Default is unlimited. diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 3c9f27d236..19098a5bc8 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -154,7 +154,7 @@ You can customise the features that Lighthouse is built with using the `FEATURES variable. E.g. ``` -FEATURES=gnosis,slasher-lmdb make +FEATURES=gnosis,slasher-lmdb,beacon-node-leveldb make ``` Commonly used features include: @@ -163,11 +163,12 @@ Commonly used features include: - `portable`: the default feature as Lighthouse now uses runtime detection of hardware CPU features. - `slasher-lmdb`: support for the LMDB slasher backend. Enabled by default. - `slasher-mdbx`: support for the MDBX slasher backend. +- `beacon-node-leveldb`: support for the leveldb backend. Enabled by default. - `jemalloc`: use [`jemalloc`][jemalloc] to allocate memory. Enabled by default on Linux and macOS. Not supported on Windows. - `spec-minimal`: support for the minimal preset (useful for testing). -Default features (e.g. `slasher-lmdb`) may be opted out of using the `--no-default-features` +Default features (e.g. `slasher-lmdb`, `beacon-node-leveldb`) may be opted out of using the `--no-default-features` argument for `cargo`, which can be plumbed in via the `CARGO_INSTALL_EXTRA_FLAGS` environment variable. E.g. diff --git a/boot_node/src/cli.rs b/boot_node/src/cli.rs index 440a9d27e2..0f274885d1 100644 --- a/boot_node/src/cli.rs +++ b/boot_node/src/cli.rs @@ -13,15 +13,6 @@ pub fn cli_app() -> Command { surface compared to a full beacon node.") .styles(get_color_style()) .display_order(0) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - ) .arg( Arg::new("enr-address") .long("enr-address") diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 480d27a439..7644d1aef4 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -7,6 +7,7 @@ edition = { workspace = true } [dependencies] derivative = { workspace = true } +either = { workspace = true } enr = { version = "0.13.0", features = ["ed25519"] } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index f19fdf7a9c..7eb96d9770 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -18,6 +18,7 @@ use self::mixin::{RequestAccept, ResponseOptional}; use self::types::{Error as ResponseError, *}; use ::types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse; use derivative::Derivative; +use either::Either; use futures::Stream; use futures_util::StreamExt; use libp2p_identity::PeerId; @@ -48,6 +49,7 @@ pub const CONSENSUS_BLOCK_VALUE_HEADER: &str = "Eth-Consensus-Block-Value"; pub const CONTENT_TYPE_HEADER: &str = "Content-Type"; pub const SSZ_CONTENT_TYPE_HEADER: &str = "application/octet-stream"; +pub const JSON_CONTENT_TYPE_HEADER: &str = "application/json"; #[derive(Debug)] pub enum Error { @@ -111,9 +113,9 @@ impl Error { Error::InvalidSignatureHeader => None, Error::MissingSignatureHeader => None, Error::InvalidJson(_) => None, + Error::InvalidSsz(_) => None, Error::InvalidServerSentEvent(_) => None, Error::InvalidHeaders(_) => None, - Error::InvalidSsz(_) => None, Error::TokenReadError(..) => None, Error::NoServerPubkey | Error::NoToken => None, } @@ -1324,9 +1326,9 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/pool/attestations` - pub async fn post_beacon_pool_attestations_v2( + pub async fn post_beacon_pool_attestations_v2( &self, - attestations: &[SingleAttestation], + attestations: Either>, Vec>, fork_name: ForkName, ) -> Result<(), Error> { let mut path = self.eth_path(V2)?; @@ -1337,13 +1339,26 @@ impl BeaconNodeHttpClient { .push("pool") .push("attestations"); - self.post_with_timeout_and_consensus_header( - path, - &attestations, - self.timeouts.attestation, - fork_name, - ) - .await?; + match attestations { + Either::Right(attestations) => { + self.post_with_timeout_and_consensus_header( + path, + &attestations, + self.timeouts.attestation, + fork_name, + ) + .await?; + } + Either::Left(attestations) => { + self.post_with_timeout_and_consensus_header( + path, + &attestations, + self.timeouts.attestation, + fork_name, + ) + .await?; + } + }; Ok(()) } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 97c53d2c22..36086454f2 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -585,12 +585,20 @@ pub struct IdentityData { pub metadata: MetaData, } +#[superstruct( + variants(V2, V3), + variant_attributes(derive(Clone, Debug, PartialEq, Serialize, Deserialize)) +)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] pub struct MetaData { #[serde(with = "serde_utils::quoted_u64")] pub seq_number: u64, pub attnets: String, pub syncnets: String, + #[superstruct(only(V3))] + #[serde(with = "serde_utils::quoted_u64")] + pub custody_group_count: u64, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -1635,7 +1643,7 @@ impl FullBlockContents { } /// SSZ decode with fork variant determined by slot. - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { let slot_len = ::ssz_fixed_len(); let slot_bytes = bytes .get(0..slot_len) @@ -1649,10 +1657,7 @@ impl FullBlockContents { } /// SSZ decode with fork variant passed in explicitly. - pub fn from_ssz_bytes_for_fork( - bytes: &[u8], - fork_name: ForkName, - ) -> Result { + pub fn from_ssz_bytes_for_fork(bytes: &[u8], fork_name: ForkName) -> Result { if fork_name.deneb_enabled() { let mut builder = ssz::SszDecoderBuilder::new(bytes); @@ -1807,7 +1812,7 @@ impl PublishBlockRequest { } /// SSZ decode with fork variant determined by `fork_name`. - pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { if fork_name.deneb_enabled() { let mut builder = ssz::SszDecoderBuilder::new(bytes); builder.register_anonymous_variable_length_item()?; @@ -1816,7 +1821,7 @@ impl PublishBlockRequest { let mut decoder = builder.build()?; let block = decoder.decode_next_with(|bytes| { - SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + SignedBeaconBlock::from_ssz_bytes_by_fork(bytes, fork_name) })?; let kzg_proofs = decoder.decode_next()?; let blobs = decoder.decode_next()?; @@ -1825,7 +1830,7 @@ impl PublishBlockRequest { Some((kzg_proofs, blobs)), )) } else { - SignedBeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + SignedBeaconBlock::from_ssz_bytes_by_fork(bytes, fork_name) .map(|block| PublishBlockRequest::Block(Arc::new(block))) } } @@ -1917,6 +1922,24 @@ pub enum FullPayloadContents { PayloadAndBlobs(ExecutionPayloadAndBlobs), } +impl ForkVersionDecode for FullPayloadContents { + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + if fork_name.deneb_enabled() { + Ok(Self::PayloadAndBlobs( + ExecutionPayloadAndBlobs::from_ssz_bytes_by_fork(bytes, fork_name)?, + )) + } else if fork_name.bellatrix_enabled() { + Ok(Self::Payload(ExecutionPayload::from_ssz_bytes_by_fork( + bytes, fork_name, + )?)) + } else { + Err(ssz::DecodeError::BytesInvalid(format!( + "FullPayloadContents decoding for {fork_name} not implemented" + ))) + } + } +} + impl FullPayloadContents { pub fn new( execution_payload: ExecutionPayload, @@ -1983,6 +2006,36 @@ pub struct ExecutionPayloadAndBlobs { pub blobs_bundle: BlobsBundle, } +impl ForkVersionDecode for ExecutionPayloadAndBlobs { + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; + builder.register_type::>()?; + let mut decoder = builder.build()?; + + if fork_name.deneb_enabled() { + let execution_payload = decoder.decode_next_with(|bytes| { + ExecutionPayload::from_ssz_bytes_by_fork(bytes, fork_name) + })?; + let blobs_bundle = decoder.decode_next()?; + Ok(Self { + execution_payload, + blobs_bundle, + }) + } else { + Err(DecodeError::BytesInvalid(format!( + "ExecutionPayloadAndBlobs decoding for {fork_name} not implemented" + ))) + } + } +} + +#[derive(Debug)] +pub enum ContentType { + Json, + Ssz, +} + #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] #[serde(bound = "E: EthSpec")] pub struct BlobsBundle { diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index d0b61422e0..e5f38b8c9b 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -35,7 +35,7 @@ DENEB_FORK_VERSION: 0x05017000 DENEB_FORK_EPOCH: 29696 # Electra ELECTRA_FORK_VERSION: 0x06017000 -ELECTRA_FORK_EPOCH: 18446744073709551615 +ELECTRA_FORK_EPOCH: 115968 # Fulu FULU_FORK_VERSION: 0x07017000 FULU_FORK_EPOCH: 18446744073709551615 @@ -127,6 +127,18 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + # DAS NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index f92de4225d..74fe727867 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -53,8 +53,6 @@ ELECTRA_FORK_EPOCH: 18446744073709551615 # Fulu FULU_FORK_VERSION: 0x06000000 FULU_FORK_EPOCH: 18446744073709551615 -# PeerDAS -EIP7594_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 7564d8f0f6..af78332205 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -36,6 +36,10 @@ CAPELLA_FORK_EPOCH: 56832 DENEB_FORK_VERSION: 0x90000073 DENEB_FORK_EPOCH: 132608 +# Electra +ELECTRA_FORK_VERSION: 0x90000074 +ELECTRA_FORK_EPOCH: 222464 + # Time parameters # --------------------------------------------------------------- # 12 seconds @@ -73,6 +77,8 @@ PROPOSER_SCORE_BOOST: 40 REORG_HEAD_WEIGHT_THRESHOLD: 20 # 160% REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 # Deposit contract # --------------------------------------------------------------- @@ -122,6 +128,18 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + # DAS NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 diff --git a/common/malloc_utils/src/jemalloc.rs b/common/malloc_utils/src/jemalloc.rs index 0e2e00cb0e..f3a35fc41c 100644 --- a/common/malloc_utils/src/jemalloc.rs +++ b/common/malloc_utils/src/jemalloc.rs @@ -9,7 +9,7 @@ //! B) `_RJEM_MALLOC_CONF` at runtime. use metrics::{set_gauge, try_create_int_gauge, IntGauge}; use std::sync::LazyLock; -use tikv_jemalloc_ctl::{arenas, epoch, stats, Error}; +use tikv_jemalloc_ctl::{arenas, epoch, stats, Access, AsName, Error}; #[global_allocator] static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; @@ -52,3 +52,18 @@ pub fn scrape_jemalloc_metrics_fallible() -> Result<(), Error> { Ok(()) } + +pub fn page_size() -> Result { + // Full list of keys: https://jemalloc.net/jemalloc.3.html + "arenas.page\0".name().read() +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn page_size_ok() { + assert!(page_size().is_ok()); + } +} diff --git a/common/malloc_utils/src/lib.rs b/common/malloc_utils/src/lib.rs index 3bb242369f..50d2785a74 100644 --- a/common/malloc_utils/src/lib.rs +++ b/common/malloc_utils/src/lib.rs @@ -29,10 +29,10 @@ not(target_env = "musl"), not(feature = "jemalloc") ))] -mod glibc; +pub mod glibc; #[cfg(feature = "jemalloc")] -mod jemalloc; +pub mod jemalloc; pub use interface::*; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 70b4b73d52..b224cde048 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -54,7 +54,7 @@ impl ForkChoiceTest { /// Creates a new tester with a custom chain config. pub fn new_with_chain_config(chain_config: ChainConfig) -> Self { // Run fork choice tests against the latest fork. - let spec = ForkName::latest().make_genesis_spec(ChainSpec::default()); + let spec = ForkName::latest_stable().make_genesis_spec(ChainSpec::default()); let harness = BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.into()) .chain_config(chain_config) diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 502ad25838..ef4799c245 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -523,9 +523,9 @@ pub fn get_expected_withdrawals( // [New in Electra:EIP7251] // Consume pending partial withdrawals let processed_partial_withdrawals_count = - if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { + if let Ok(pending_partial_withdrawals) = state.pending_partial_withdrawals() { let mut processed_partial_withdrawals_count = 0; - for withdrawal in partial_withdrawals { + for withdrawal in pending_partial_withdrawals { if withdrawal.withdrawable_epoch > epoch || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize { @@ -552,7 +552,7 @@ pub fn get_expected_withdrawals( validator_index: withdrawal.validator_index, address: validator .get_execution_withdrawal_address(spec) - .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?, + .ok_or(BeaconStateError::NonExecutionAddressWithdrawalCredential)?, amount: withdrawable_balance, }); withdrawal_index.safe_add_assign(1)?; @@ -583,7 +583,7 @@ pub fn get_expected_withdrawals( validator_index as usize, ))? .safe_sub(partially_withdrawn_balance)?; - if validator.is_fully_withdrawable_at(balance, epoch, spec, fork_name) { + if validator.is_fully_withdrawable_validator(balance, epoch, spec, fork_name) { withdrawals.push(Withdrawal { index: withdrawal_index, validator_index, @@ -600,9 +600,7 @@ pub fn get_expected_withdrawals( address: validator .get_execution_withdrawal_address(spec) .ok_or(BlockProcessingError::WithdrawalCredentialsInvalid)?, - amount: balance.safe_sub( - validator.get_max_effective_balance(spec, state.fork_name_unchecked()), - )?, + amount: balance.safe_sub(validator.get_max_effective_balance(spec, fork_name))?, }); withdrawal_index.safe_add_assign(1)?; } @@ -624,7 +622,7 @@ pub fn process_withdrawals>( spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { if state.fork_name_unchecked().capella_enabled() { - let (expected_withdrawals, partial_withdrawals_count) = + let (expected_withdrawals, processed_partial_withdrawals_count) = get_expected_withdrawals(state, spec)?; let expected_root = expected_withdrawals.tree_hash_root(); let withdrawals_root = payload.withdrawals_root()?; @@ -645,14 +643,10 @@ pub fn process_withdrawals>( } // Update pending partial withdrawals [New in Electra:EIP7251] - if let Some(partial_withdrawals_count) = partial_withdrawals_count { - // TODO(electra): Use efficient pop_front after milhouse release https://github.com/sigp/milhouse/pull/38 - let new_partial_withdrawals = state - .pending_partial_withdrawals()? - .iter_from(partial_withdrawals_count)? - .cloned() - .collect::>(); - *state.pending_partial_withdrawals_mut()? = List::new(new_partial_withdrawals)?; + if let Some(processed_partial_withdrawals_count) = processed_partial_withdrawals_count { + state + .pending_partial_withdrawals_mut()? + .pop_front(processed_partial_withdrawals_count)?; } // Update the next withdrawal index if this block contained withdrawals diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index a4a81c8eef..5c31669a60 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -1075,13 +1075,9 @@ fn process_pending_consolidations( next_pending_consolidation.safe_add_assign(1)?; } - let new_pending_consolidations = List::try_from_iter( - state - .pending_consolidations()? - .iter_from(next_pending_consolidation)? - .cloned(), - )?; - *state.pending_consolidations_mut()? = new_pending_consolidations; + state + .pending_consolidations_mut()? + .pop_front(next_pending_consolidation)?; // the spec tests require we don't perform effective balance updates when testing pending_consolidations if !perform_effective_balance_updates { diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 0f32e1553d..258b28a45b 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -47,10 +47,11 @@ pub fn upgrade_to_electra( .enumerate() .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) .sorted_by_key(|(index, validator)| (validator.activation_eligibility_epoch, *index)) + .map(|(index, _)| index) .collect::>(); // Process validators to queue entire balance and reset them - for (index, _) in pre_activation { + for index in pre_activation { let balance = post .balances_mut() .get_mut(index) diff --git a/consensus/types/presets/gnosis/deneb.yaml b/consensus/types/presets/gnosis/deneb.yaml index 9a46a6dafe..d25c4d3d38 100644 --- a/consensus/types/presets/gnosis/deneb.yaml +++ b/consensus/types/presets/gnosis/deneb.yaml @@ -1,6 +1,4 @@ # Gnosis preset - Deneb -# NOTE: The below are PLACEHOLDER values from Mainnet. -# Gnosis preset for the Deneb fork TBD: https://github.com/gnosischain/configs/tree/main/presets/gnosis # Misc # --------------------------------------------------------------- diff --git a/consensus/types/presets/gnosis/eip7594.yaml b/consensus/types/presets/gnosis/eip7594.yaml deleted file mode 100644 index 813febf26d..0000000000 --- a/consensus/types/presets/gnosis/eip7594.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Mainnet preset - EIP7594 - -# Misc -# --------------------------------------------------------------- -# `uint64(2**6)` (= 64) -FIELD_ELEMENTS_PER_CELL: 64 -# `uint64(2 * 4096)` (= 8192) -FIELD_ELEMENTS_PER_EXT_BLOB: 8192 -# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) -KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/gnosis/fulu.yaml b/consensus/types/presets/gnosis/fulu.yaml index 35a7c98fbf..e5f3ce0212 100644 --- a/consensus/types/presets/gnosis/fulu.yaml +++ b/consensus/types/presets/gnosis/fulu.yaml @@ -1,3 +1,10 @@ # Gnosis preset - Fulu -FULU_PLACEHOLDER: 0 +# Misc +# --------------------------------------------------------------- +# `uint64(2**6)` (= 64) +FIELD_ELEMENTS_PER_CELL: 64 +# `uint64(2 * 4096)` (= 8192) +FIELD_ELEMENTS_PER_EXT_BLOB: 8192 +# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) +KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/mainnet/eip7594.yaml b/consensus/types/presets/mainnet/eip7594.yaml deleted file mode 100644 index 813febf26d..0000000000 --- a/consensus/types/presets/mainnet/eip7594.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Mainnet preset - EIP7594 - -# Misc -# --------------------------------------------------------------- -# `uint64(2**6)` (= 64) -FIELD_ELEMENTS_PER_CELL: 64 -# `uint64(2 * 4096)` (= 8192) -FIELD_ELEMENTS_PER_EXT_BLOB: 8192 -# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) -KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/mainnet/fulu.yaml b/consensus/types/presets/mainnet/fulu.yaml index 8aa9ccdcc3..394f335f90 100644 --- a/consensus/types/presets/mainnet/fulu.yaml +++ b/consensus/types/presets/mainnet/fulu.yaml @@ -1,3 +1,10 @@ # Mainnet preset - Fulu -FULU_PLACEHOLDER: 0 +# Misc +# --------------------------------------------------------------- +# `uint64(2**6)` (= 64) +FIELD_ELEMENTS_PER_CELL: 64 +# `uint64(2 * 4096)` (= 8192) +FIELD_ELEMENTS_PER_EXT_BLOB: 8192 +# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) +KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/minimal/eip7594.yaml b/consensus/types/presets/minimal/eip7594.yaml deleted file mode 100644 index 847719a421..0000000000 --- a/consensus/types/presets/minimal/eip7594.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# Minimal preset - EIP7594 - -# Misc -# --------------------------------------------------------------- -# `uint64(2**6)` (= 64) -FIELD_ELEMENTS_PER_CELL: 64 -# `uint64(2 * 4096)` (= 8192) -FIELD_ELEMENTS_PER_EXT_BLOB: 8192 -# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) -KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/presets/minimal/fulu.yaml b/consensus/types/presets/minimal/fulu.yaml index 121c9858f4..c961eb7f3c 100644 --- a/consensus/types/presets/minimal/fulu.yaml +++ b/consensus/types/presets/minimal/fulu.yaml @@ -1,3 +1,10 @@ # Minimal preset - Fulu -FULU_PLACEHOLDER: 0 +# Misc +# --------------------------------------------------------------- +# `uint64(2**6)` (= 64) +FIELD_ELEMENTS_PER_CELL: 64 +# `uint64(2 * 4096)` (= 8192) +FIELD_ELEMENTS_PER_EXT_BLOB: 8192 +# uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) +KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: 4 diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index e22bba4cef..a0b083cdf2 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -2,7 +2,6 @@ use crate::slot_data::SlotData; use crate::{test_utils::TestRandom, Hash256, Slot}; use crate::{Checkpoint, ForkVersionDeserialize}; use derivative::Derivative; -use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::BitVector; @@ -12,22 +11,17 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; use super::{ - AggregateSignature, AttestationData, BitList, ChainSpec, CommitteeIndex, Domain, EthSpec, Fork, - SecretKey, Signature, SignedRoot, + AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, + Signature, SignedRoot, }; #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), AlreadySigned(usize), - SubnetCountIsZero(ArithError), IncorrectStateVariant, InvalidCommitteeLength, InvalidCommitteeIndex, - AttesterNotInCommittee(usize), - InvalidCommittee, - MissingCommittee, - NoCommitteeForSlotAndIndex { slot: Slot, index: CommitteeIndex }, } impl From for Error { @@ -238,7 +232,7 @@ impl Attestation { pub fn to_single_attestation_with_attester_index( &self, - attester_index: usize, + attester_index: u64, ) -> Result { match self { Self::Base(_) => Err(Error::IncorrectStateVariant), @@ -375,14 +369,14 @@ impl AttestationElectra { pub fn to_single_attestation_with_attester_index( &self, - attester_index: usize, + attester_index: u64, ) -> Result { let Some(committee_index) = self.committee_index() else { return Err(Error::InvalidCommitteeIndex); }; Ok(SingleAttestation { - committee_index: committee_index as usize, + committee_index, attester_index, data: self.data.clone(), signature: self.signature.clone(), @@ -579,44 +573,14 @@ impl ForkVersionDeserialize for Vec> { PartialEq, )] pub struct SingleAttestation { - pub committee_index: usize, - pub attester_index: usize, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub attester_index: u64, pub data: AttestationData, pub signature: AggregateSignature, } -impl SingleAttestation { - pub fn to_attestation(&self, committee: &[usize]) -> Result, Error> { - let aggregation_bit = committee - .iter() - .enumerate() - .find_map(|(i, &validator_index)| { - if self.attester_index == validator_index { - return Some(i); - } - None - }) - .ok_or(Error::AttesterNotInCommittee(self.attester_index))?; - - let mut committee_bits: BitVector = BitVector::default(); - committee_bits - .set(self.committee_index, true) - .map_err(|_| Error::InvalidCommitteeIndex)?; - - let mut aggregation_bits = - BitList::with_capacity(committee.len()).map_err(|_| Error::InvalidCommitteeLength)?; - - aggregation_bits.set(aggregation_bit, true)?; - - Ok(Attestation::Electra(AttestationElectra { - aggregation_bits, - committee_bits, - data: self.data.clone(), - signature: self.signature.clone(), - })) - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 6f44998cdf..157271b227 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -161,7 +161,7 @@ pub enum Error { InvalidFlagIndex(usize), MerkleTreeError(merkle_proof::MerkleTreeError), PartialWithdrawalCountInvalid(usize), - NonExecutionAddresWithdrawalCredential, + NonExecutionAddressWithdrawalCredential, NoCommitteeFound(CommitteeIndex), InvalidCommitteeIndex(CommitteeIndex), InvalidSelectionProof { @@ -2214,7 +2214,7 @@ impl BeaconState { // ******* Electra accessors ******* - /// Return the churn limit for the current epoch. + /// Return the churn limit for the current epoch. pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { let total_active_balance = self.get_total_active_balance()?; let churn = std::cmp::max( @@ -2329,21 +2329,12 @@ impl BeaconState { | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), - BeaconState::Electra(_) => { - let state = self.as_electra_mut()?; - + BeaconState::Electra(_) | BeaconState::Fulu(_) => { // Consume the balance and update state variables - state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; - state.earliest_exit_epoch = earliest_exit_epoch; - Ok(state.earliest_exit_epoch) - } - BeaconState::Fulu(_) => { - let state = self.as_fulu_mut()?; - - // Consume the balance and update state variables - state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; - state.earliest_exit_epoch = earliest_exit_epoch; - Ok(state.earliest_exit_epoch) + *self.exit_balance_to_consume_mut()? = + exit_balance_to_consume.safe_sub(exit_balance)?; + *self.earliest_exit_epoch_mut()? = earliest_exit_epoch; + self.earliest_exit_epoch() } } } @@ -2385,23 +2376,12 @@ impl BeaconState { | BeaconState::Bellatrix(_) | BeaconState::Capella(_) | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), - BeaconState::Electra(_) => { - let state = self.as_electra_mut()?; - + BeaconState::Electra(_) | BeaconState::Fulu(_) => { // Consume the balance and update state variables. - state.consolidation_balance_to_consume = + *self.consolidation_balance_to_consume_mut()? = consolidation_balance_to_consume.safe_sub(consolidation_balance)?; - state.earliest_consolidation_epoch = earliest_consolidation_epoch; - Ok(state.earliest_consolidation_epoch) - } - BeaconState::Fulu(_) => { - let state = self.as_fulu_mut()?; - - // Consume the balance and update state variables. - state.consolidation_balance_to_consume = - consolidation_balance_to_consume.safe_sub(consolidation_balance)?; - state.earliest_consolidation_epoch = earliest_consolidation_epoch; - Ok(state.earliest_consolidation_epoch) + *self.earliest_consolidation_epoch_mut()? = earliest_consolidation_epoch; + self.earliest_consolidation_epoch() } } } diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 2ce46ca704..49911c3909 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -2,26 +2,38 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ ChainSpec, EthSpec, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, ForkVersionDeserialize, - SignedRoot, Uint256, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, + ForkVersionDecode, ForkVersionDeserialize, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize, Deserializer, Serialize}; +use ssz::Decode; +use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use tree_hash_derive::TreeHash; #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( - derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone), + derive( + PartialEq, + Debug, + Encode, + Serialize, + Deserialize, + TreeHash, + Decode, + Clone + ), serde(bound = "E: EthSpec", deny_unknown_fields) ), map_ref_into(ExecutionPayloadHeaderRef), map_ref_mut_into(ExecutionPayloadHeaderRefMut) )] -#[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] +#[derive(PartialEq, Debug, Encode, Serialize, Deserialize, TreeHash, Clone)] #[serde(bound = "E: EthSpec", deny_unknown_fields, untagged)] +#[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] pub struct BuilderBid { #[superstruct(only(Bellatrix), partial_getter(rename = "header_bellatrix"))] @@ -36,6 +48,8 @@ pub struct BuilderBid { pub header: ExecutionPayloadHeaderFulu, #[superstruct(only(Deneb, Electra, Fulu))] pub blob_kzg_commitments: KzgCommitments, + #[superstruct(only(Electra, Fulu))] + pub execution_requests: ExecutionRequests, #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, @@ -63,16 +77,54 @@ impl<'a, E: EthSpec> BuilderBidRefMut<'a, E> { } } +impl ForkVersionDecode for BuilderBid { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + let builder_bid = match fork_name { + ForkName::Altair | ForkName::Base => { + return Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayloadHeader: {fork_name}", + ))) + } + ForkName::Bellatrix => { + BuilderBid::Bellatrix(BuilderBidBellatrix::from_ssz_bytes(bytes)?) + } + ForkName::Capella => BuilderBid::Capella(BuilderBidCapella::from_ssz_bytes(bytes)?), + ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb::from_ssz_bytes(bytes)?), + ForkName::Electra => BuilderBid::Electra(BuilderBidElectra::from_ssz_bytes(bytes)?), + ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu::from_ssz_bytes(bytes)?), + }; + Ok(builder_bid) + } +} + impl SignedRoot for BuilderBid {} /// Validator registration, for use in interacting with servers implementing the builder API. -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[derive(PartialEq, Debug, Encode, Serialize, Deserialize, Clone)] #[serde(bound = "E: EthSpec")] pub struct SignedBuilderBid { pub message: BuilderBid, pub signature: Signature, } +impl ForkVersionDecode for SignedBuilderBid { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + let mut builder = ssz::SszDecoderBuilder::new(bytes); + + builder.register_anonymous_variable_length_item()?; + builder.register_type::()?; + + let mut decoder = builder.build()?; + let message = decoder + .decode_next_with(|bytes| BuilderBid::from_ssz_bytes_by_fork(bytes, fork_name))?; + let signature = decoder.decode_next()?; + + Ok(Self { message, signature }) + } +} + impl ForkVersionDeserialize for BuilderBid { fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: serde_json::value::Value, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 9177f66b94..230805e86c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -198,12 +198,6 @@ pub struct ChainSpec { pub fulu_fork_version: [u8; 4], /// The Fulu fork epoch is optional, with `None` representing "Fulu never happens". pub fulu_fork_epoch: Option, - pub fulu_placeholder: u64, - - /* - * DAS params - */ - pub eip7594_fork_epoch: Option, pub number_of_columns: u64, pub number_of_custody_groups: u64, pub data_column_sidecar_subnet_count: u64, @@ -217,7 +211,7 @@ pub struct ChainSpec { pub network_id: u8, pub target_aggregators_per_committee: u64, pub gossip_max_size: u64, - pub max_request_blocks: u64, + max_request_blocks: u64, pub min_epochs_for_block_requests: u64, pub max_chunk_size: u64, pub ttfb_timeout: u64, @@ -233,19 +227,19 @@ pub struct ChainSpec { /* * Networking Deneb */ - pub max_request_blocks_deneb: u64, - pub max_request_blob_sidecars: u64, + max_request_blocks_deneb: u64, + max_request_blob_sidecars: u64, pub max_request_data_column_sidecars: u64, pub min_epochs_for_blob_sidecars_requests: u64, - pub blob_sidecar_subnet_count: u64, - pub max_blobs_per_block: u64, + blob_sidecar_subnet_count: u64, + max_blobs_per_block: u64, /* * Networking Electra */ max_blobs_per_block_electra: u64, - pub blob_sidecar_subnet_count_electra: u64, - pub max_request_blob_sidecars_electra: u64, + blob_sidecar_subnet_count_electra: u64, + max_request_blob_sidecars_electra: u64, /* * Networking Derived @@ -440,16 +434,16 @@ impl ChainSpec { } } - /// Returns true if the given epoch is greater than or equal to the `EIP7594_FORK_EPOCH`. + /// Returns true if the given epoch is greater than or equal to the `FULU_FORK_EPOCH`. pub fn is_peer_das_enabled_for_epoch(&self, block_epoch: Epoch) -> bool { - self.eip7594_fork_epoch - .is_some_and(|eip7594_fork_epoch| block_epoch >= eip7594_fork_epoch) + self.fulu_fork_epoch + .is_some_and(|fulu_fork_epoch| block_epoch >= fulu_fork_epoch) } - /// Returns true if `EIP7594_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. + /// Returns true if `FULU_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. pub fn is_peer_das_scheduled(&self) -> bool { - self.eip7594_fork_epoch - .is_some_and(|eip7594_fork_epoch| eip7594_fork_epoch != self.far_future_epoch) + self.fulu_fork_epoch + .is_some_and(|fulu_fork_epoch| fulu_fork_epoch != self.far_future_epoch) } /// Returns a full `Fork` struct for a given epoch. @@ -625,6 +619,17 @@ impl ChainSpec { } } + /// Returns the highest possible value for max_request_blocks based on enabled forks. + /// + /// This is useful for upper bounds in testing. + pub fn max_request_blocks_upper_bound(&self) -> usize { + if self.deneb_fork_epoch.is_some() { + self.max_request_blocks_deneb as usize + } else { + self.max_request_blocks as usize + } + } + pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize { if fork_name.electra_enabled() { self.max_request_blob_sidecars_electra as usize @@ -633,6 +638,17 @@ impl ChainSpec { } } + /// Returns the highest possible value for max_request_blobs based on enabled forks. + /// + /// This is useful for upper bounds in testing. + pub fn max_request_blobs_upper_bound(&self) -> usize { + if self.electra_fork_epoch.is_some() { + self.max_request_blob_sidecars_electra as usize + } else { + self.max_request_blob_sidecars as usize + } + } + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) @@ -647,6 +663,26 @@ impl ChainSpec { } } + /// Returns the `BLOB_SIDECAR_SUBNET_COUNT` at the given fork_name. + pub fn blob_sidecar_subnet_count(&self, fork_name: ForkName) -> u64 { + if fork_name.electra_enabled() { + self.blob_sidecar_subnet_count_electra + } else { + self.blob_sidecar_subnet_count + } + } + + /// Returns the highest possible value of blob sidecar subnet count based on enabled forks. + /// + /// This is useful for upper bounds for the subnet count during a given run of lighthouse. + pub fn blob_sidecar_subnet_count_max(&self) -> u64 { + if self.electra_fork_epoch.is_some() { + self.blob_sidecar_subnet_count_electra + } else { + self.blob_sidecar_subnet_count + } + } + /// Returns the number of data columns per custody group. pub fn data_columns_per_group(&self) -> u64 { self.number_of_columns @@ -874,17 +910,11 @@ impl ChainSpec { */ fulu_fork_version: [0x06, 0x00, 0x00, 0x00], fulu_fork_epoch: None, - fulu_placeholder: 0, - - /* - * DAS params - */ - eip7594_fork_epoch: None, - number_of_columns: 128, + custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, + number_of_columns: 128, samples_per_slot: 8, - custody_requirement: 4, /* * Network specific @@ -1003,8 +1033,6 @@ impl ChainSpec { // Fulu fulu_fork_version: [0x06, 0x00, 0x00, 0x01], fulu_fork_epoch: None, - // PeerDAS - eip7594_fork_epoch: None, // Other network_id: 2, // lighthouse testnet network id deposit_chain_id: 5, @@ -1212,17 +1240,11 @@ impl ChainSpec { */ fulu_fork_version: [0x06, 0x00, 0x00, 0x64], fulu_fork_epoch: None, - fulu_placeholder: 0, - - /* - * DAS params - */ - eip7594_fork_epoch: None, - number_of_columns: 128, + custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, + number_of_columns: 128, samples_per_slot: 8, - custody_requirement: 4, /* * Network specific @@ -1366,11 +1388,6 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub fulu_fork_epoch: Option>, - #[serde(default)] - #[serde(serialize_with = "serialize_fork_epoch")] - #[serde(deserialize_with = "deserialize_fork_epoch")] - pub eip7594_fork_epoch: Option>, - #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -1813,10 +1830,6 @@ impl Config { .fulu_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), - eip7594_fork_epoch: spec - .eip7594_fork_epoch - .map(|epoch| MaybeQuoted { value: epoch }), - seconds_per_slot: spec.seconds_per_slot, seconds_per_eth1_block: spec.seconds_per_eth1_block, min_validator_withdrawability_delay: spec.min_validator_withdrawability_delay, @@ -1903,7 +1916,6 @@ impl Config { electra_fork_version, fulu_fork_epoch, fulu_fork_version, - eip7594_fork_epoch, seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, @@ -1973,7 +1985,6 @@ impl Config { electra_fork_version, fulu_fork_epoch: fulu_fork_epoch.map(|q| q.value), fulu_fork_version, - eip7594_fork_epoch: eip7594_fork_epoch.map(|q| q.value), seconds_per_slot, seconds_per_eth1_block, min_validator_withdrawability_delay, diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index b2a050e9d5..90a914dfae 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -133,20 +133,6 @@ impl DataColumnSidecar { .len() } - pub fn empty() -> Self { - Self { - index: 0, - column: DataColumn::::default(), - kzg_commitments: VariableList::default(), - kzg_proofs: VariableList::default(), - signed_block_header: SignedBeaconBlockHeader { - message: BeaconBlockHeader::empty(), - signature: Signature::empty(), - }, - kzg_commitments_inclusion_proof: Default::default(), - } - } - pub fn id(&self) -> DataColumnIdentifier { DataColumnIdentifier { block_root: self.block_root(), diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 2df66343af..5d756c8529 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -40,7 +40,7 @@ pub type Withdrawals = VariableList::MaxWithdrawal map_ref_into(ExecutionPayloadHeader) )] #[derive( - Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative, arbitrary::Arbitrary, + Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, )] #[derivative(PartialEq, Hash(bound = "E: EthSpec"))] #[serde(bound = "E: EthSpec", untagged)] @@ -102,8 +102,9 @@ impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { } } -impl ExecutionPayload { - pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { +impl ForkVersionDecode for ExecutionPayload { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { match fork_name { ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid(format!( "unsupported fork for ExecutionPayload: {fork_name}", @@ -117,7 +118,9 @@ impl ExecutionPayload { ForkName::Fulu => ExecutionPayloadFulu::from_ssz_bytes(bytes).map(Self::Fulu), } } +} +impl ExecutionPayload { #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. pub fn max_execution_payload_bellatrix_size() -> usize { diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 33f1c51d44..a6360705ba 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -22,61 +22,22 @@ impl ForkContext { genesis_validators_root: Hash256, spec: &ChainSpec, ) -> Self { - let mut fork_to_digest = vec![( - ForkName::Base, - ChainSpec::compute_fork_digest(spec.genesis_fork_version, genesis_validators_root), - )]; - - // Only add Altair to list of forks if it's enabled - // Note: `altair_fork_epoch == None` implies altair hasn't been activated yet on the config. - if spec.altair_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Altair, - ChainSpec::compute_fork_digest(spec.altair_fork_version, genesis_validators_root), - )); - } - - // Only add Bellatrix to list of forks if it's enabled - // Note: `bellatrix_fork_epoch == None` implies bellatrix hasn't been activated yet on the config. - if spec.bellatrix_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Bellatrix, - ChainSpec::compute_fork_digest( - spec.bellatrix_fork_version, - genesis_validators_root, - ), - )); - } - - if spec.capella_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Capella, - ChainSpec::compute_fork_digest(spec.capella_fork_version, genesis_validators_root), - )); - } - - if spec.deneb_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Deneb, - ChainSpec::compute_fork_digest(spec.deneb_fork_version, genesis_validators_root), - )); - } - - if spec.electra_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Electra, - ChainSpec::compute_fork_digest(spec.electra_fork_version, genesis_validators_root), - )); - } - - if spec.fulu_fork_epoch.is_some() { - fork_to_digest.push(( - ForkName::Fulu, - ChainSpec::compute_fork_digest(spec.fulu_fork_version, genesis_validators_root), - )); - } - - let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); + let fork_to_digest: HashMap = ForkName::list_all() + .into_iter() + .filter_map(|fork| { + if spec.fork_epoch(fork).is_some() { + Some(( + fork, + ChainSpec::compute_fork_digest( + spec.fork_version_for_name(fork), + genesis_validators_root, + ), + )) + } else { + None + } + }) + .collect(); let digest_to_fork = fork_to_digest .clone() diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index b61e0a4d4a..e92db49485 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -34,14 +34,12 @@ impl ForkName { } pub fn list_all_fork_epochs(spec: &ChainSpec) -> Vec<(ForkName, Option)> { - vec![ - (ForkName::Altair, spec.altair_fork_epoch), - (ForkName::Bellatrix, spec.bellatrix_fork_epoch), - (ForkName::Capella, spec.capella_fork_epoch), - (ForkName::Deneb, spec.deneb_fork_epoch), - (ForkName::Electra, spec.electra_fork_epoch), - (ForkName::Fulu, spec.fulu_fork_epoch), - ] + ForkName::list_all() + .into_iter() + // Skip Base + .skip(1) + .map(|fork| (fork, spec.fork_epoch(fork))) + .collect() } pub fn latest() -> ForkName { @@ -49,6 +47,13 @@ impl ForkName { *ForkName::list_all().last().unwrap() } + /// Returns the fork primarily used for testing purposes. + /// This fork serves as the baseline for many tests, and the goal + /// is to ensure features are passing on this fork. + pub fn latest_stable() -> ForkName { + ForkName::Electra + } + /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` /// is the only fork in effect from genesis. pub fn make_genesis_spec(&self, mut spec: ChainSpec) -> ChainSpec { diff --git a/consensus/types/src/fork_versioned_response.rs b/consensus/types/src/fork_versioned_response.rs index cd78b5b3ca..7e4efd05d6 100644 --- a/consensus/types/src/fork_versioned_response.rs +++ b/consensus/types/src/fork_versioned_response.rs @@ -4,6 +4,11 @@ use serde::{Deserialize, Deserializer, Serialize}; use serde_json::value::Value; use std::sync::Arc; +pub trait ForkVersionDecode: Sized { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; +} + pub trait ForkVersionDeserialize: Sized + DeserializeOwned { fn deserialize_by_fork<'de, D: Deserializer<'de>>( value: Value, diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 11d1f5271b..73a50b4ef3 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -178,7 +178,9 @@ pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::fork_versioned_response::{ForkVersionDeserialize, ForkVersionedResponse}; +pub use crate::fork_versioned_response::{ + ForkVersionDecode, ForkVersionDeserialize, ForkVersionedResponse, +}; pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::{ diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 9a9915e458..707d2d4697 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -276,21 +276,6 @@ impl ElectraPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct FuluPreset { - #[serde(with = "serde_utils::quoted_u64")] - pub fulu_placeholder: u64, -} - -impl FuluPreset { - pub fn from_chain_spec(spec: &ChainSpec) -> Self { - Self { - fulu_placeholder: spec.fulu_placeholder, - } - } -} - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -#[serde(rename_all = "UPPERCASE")] -pub struct Eip7594Preset { #[serde(with = "serde_utils::quoted_u64")] pub field_elements_per_cell: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -299,7 +284,7 @@ pub struct Eip7594Preset { pub kzg_commitments_inclusion_proof_depth: u64, } -impl Eip7594Preset { +impl FuluPreset { pub fn from_chain_spec(_spec: &ChainSpec) -> Self { Self { field_elements_per_cell: E::field_elements_per_cell() as u64, @@ -357,9 +342,6 @@ mod test { let fulu: FuluPreset = preset_from_file(&preset_name, "fulu.yaml"); assert_eq!(fulu, FuluPreset::from_chain_spec::(&spec)); - - let eip7594: Eip7594Preset = preset_from_file(&preset_name, "eip7594.yaml"); - assert_eq!(eip7594, Eip7594Preset::from_chain_spec::(&spec)); } #[test] diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index d9bf9bf55d..eb5925a29b 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -86,6 +86,17 @@ pub struct SignedBeaconBlock = FullP pub signature: Signature, } +impl> ForkVersionDecode + for SignedBeaconBlock +{ + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { + Self::from_ssz_bytes_with(bytes, |bytes| { + BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) + }) + } +} + pub type SignedBlindedBeaconBlock = SignedBeaconBlock>; impl> SignedBeaconBlock { @@ -108,16 +119,6 @@ impl> SignedBeaconBlock Self::from_ssz_bytes_with(bytes, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) } - /// SSZ decode with explicit fork variant. - pub fn from_ssz_bytes_for_fork( - bytes: &[u8], - fork_name: ForkName, - ) -> Result { - Self::from_ssz_bytes_with(bytes, |bytes| { - BeaconBlock::from_ssz_bytes_for_fork(bytes, fork_name) - }) - } - /// SSZ decode which attempts to decode all variants (slow). pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { Self::from_ssz_bytes_with(bytes, BeaconBlock::any_from_ssz_bytes) diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 981d6d5653..7a5357c6cc 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -67,7 +67,7 @@ impl SubnetId { ) -> Result { Self::compute_subnet::( attestation.data.slot, - attestation.committee_index as u64, + attestation.committee_index, committee_count_per_slot, spec, ) diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index c348c3e8be..9bae770fe5 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -1,7 +1,6 @@ use super::{AggregateSignature, EthSpec, SignedRoot}; use crate::slot_data::SlotData; use crate::{test_utils::TestRandom, BitVector, Hash256, Slot, SyncCommitteeMessage}; -use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -11,7 +10,6 @@ use tree_hash_derive::TreeHash; pub enum Error { SszTypesError(ssz_types::Error), AlreadySigned(usize), - SubnetCountIsZero(ArithError), } /// An aggregation of `SyncCommitteeMessage`s, used in creating a `SignedContributionAndProof`. diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 222b9292a2..5aed90d2c1 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -56,7 +56,7 @@ impl Validator { }; let max_effective_balance = validator.get_max_effective_balance(spec, fork_name); - // safe math is unnecessary here since the spec.effecive_balance_increment is never <= 0 + // safe math is unnecessary here since the spec.effective_balance_increment is never <= 0 validator.effective_balance = std::cmp::min( amount - (amount % spec.effective_balance_increment), max_effective_balance, @@ -195,7 +195,7 @@ impl Validator { /// Returns `true` if the validator is fully withdrawable at some epoch. /// /// Calls the correct function depending on the provided `fork_name`. - pub fn is_fully_withdrawable_at( + pub fn is_fully_withdrawable_validator( &self, balance: u64, epoch: Epoch, @@ -203,14 +203,14 @@ impl Validator { current_fork: ForkName, ) -> bool { if current_fork.electra_enabled() { - self.is_fully_withdrawable_at_electra(balance, epoch, spec) + self.is_fully_withdrawable_validator_electra(balance, epoch, spec) } else { - self.is_fully_withdrawable_at_capella(balance, epoch, spec) + self.is_fully_withdrawable_validator_capella(balance, epoch, spec) } } /// Returns `true` if the validator is fully withdrawable at some epoch. - fn is_fully_withdrawable_at_capella( + fn is_fully_withdrawable_validator_capella( &self, balance: u64, epoch: Epoch, @@ -222,7 +222,7 @@ impl Validator { /// Returns `true` if the validator is fully withdrawable at some epoch. /// /// Modified in electra as part of EIP 7251. - fn is_fully_withdrawable_at_electra( + fn is_fully_withdrawable_validator_electra( &self, balance: u64, epoch: Epoch, diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 348ed785af..2a5c6e47f5 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -21,6 +21,9 @@ pub use rust_eth_kzg::{ Cell, CellIndex as CellID, CellRef, TrustedSetup as PeerDASTrustedSetup, }; +// Note: `spec.number_of_columns` is a config and should match `CELLS_PER_EXT_BLOB` - however this +// is a constant in the KZG library - be aware that overriding `number_of_columns` will break KZG +// operations. pub type CellsAndKzgProofs = ([Cell; CELLS_PER_EXT_BLOB], [KzgProof; CELLS_PER_EXT_BLOB]); pub type KzgBlobRef<'a> = &'a [u8; BYTES_PER_BLOB]; diff --git a/database_manager/src/cli.rs b/database_manager/src/cli.rs index 4246a51f89..c62da1206f 100644 --- a/database_manager/src/cli.rs +++ b/database_manager/src/cli.rs @@ -59,13 +59,12 @@ pub struct DatabaseManager { #[clap( long, - global = true, - help = "Prints help information", - action = clap::ArgAction::HelpLong, + value_name = "DATABASE", + help = "Set the database backend to be used by the beacon node.", display_order = 0, - help_heading = FLAG_HEADER + default_value_t = store::config::DatabaseBackend::LevelDb )] - help: Option, + pub backend: store::config::DatabaseBackend, #[clap(subcommand)] pub subcommand: DatabaseManagerSubcommand, diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index fc15e98616..bed90df9df 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -16,10 +16,12 @@ use slog::{info, warn, Logger}; use std::fs; use std::io::Write; use std::path::PathBuf; +use store::KeyValueStore; use store::{ + database::interface::BeaconNodeBackend, errors::Error, metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}, - DBColumn, HotColdDB, KeyValueStore, LevelDB, + DBColumn, HotColdDB, }; use strum::{EnumString, EnumVariantNames}; use types::{BeaconState, EthSpec, Slot}; @@ -40,7 +42,7 @@ fn parse_client_config( .clone_from(&database_manager_config.blobs_dir); client_config.store.blob_prune_margin_epochs = database_manager_config.blob_prune_margin_epochs; client_config.store.hierarchy_config = database_manager_config.hierarchy_exponents.clone(); - + client_config.store.backend = database_manager_config.backend; Ok(client_config) } @@ -55,7 +57,7 @@ pub fn display_db_version( let blobs_path = client_config.get_blobs_db_path(); let mut version = CURRENT_SCHEMA_VERSION; - HotColdDB::, LevelDB>::open( + HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, @@ -145,11 +147,14 @@ pub fn inspect_db( let mut num_keys = 0; let sub_db = if inspect_config.freezer { - LevelDB::::open(&cold_path).map_err(|e| format!("Unable to open freezer DB: {e:?}"))? + BeaconNodeBackend::::open(&client_config.store, &cold_path) + .map_err(|e| format!("Unable to open freezer DB: {e:?}"))? } else if inspect_config.blobs_db { - LevelDB::::open(&blobs_path).map_err(|e| format!("Unable to open blobs DB: {e:?}"))? + BeaconNodeBackend::::open(&client_config.store, &blobs_path) + .map_err(|e| format!("Unable to open blobs DB: {e:?}"))? } else { - LevelDB::::open(&hot_path).map_err(|e| format!("Unable to open hot DB: {e:?}"))? + BeaconNodeBackend::::open(&client_config.store, &hot_path) + .map_err(|e| format!("Unable to open hot DB: {e:?}"))? }; let skip = inspect_config.skip.unwrap_or(0); @@ -263,11 +268,20 @@ pub fn compact_db( let column = compact_config.column; let (sub_db, db_name) = if compact_config.freezer { - (LevelDB::::open(&cold_path)?, "freezer_db") + ( + BeaconNodeBackend::::open(&client_config.store, &cold_path)?, + "freezer_db", + ) } else if compact_config.blobs_db { - (LevelDB::::open(&blobs_path)?, "blobs_db") + ( + BeaconNodeBackend::::open(&client_config.store, &blobs_path)?, + "blobs_db", + ) } else { - (LevelDB::::open(&hot_path)?, "hot_db") + ( + BeaconNodeBackend::::open(&client_config.store, &hot_path)?, + "hot_db", + ) }; info!( log, @@ -303,7 +317,7 @@ pub fn migrate_db( let mut from = CURRENT_SCHEMA_VERSION; let to = migrate_config.to; - let db = HotColdDB::, LevelDB>::open( + let db = HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, @@ -343,7 +357,7 @@ pub fn prune_payloads( let cold_path = client_config.get_freezer_db_path(); let blobs_path = client_config.get_blobs_db_path(); - let db = HotColdDB::, LevelDB>::open( + let db = HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, @@ -369,7 +383,7 @@ pub fn prune_blobs( let cold_path = client_config.get_freezer_db_path(); let blobs_path = client_config.get_blobs_db_path(); - let db = HotColdDB::, LevelDB>::open( + let db = HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, @@ -406,7 +420,7 @@ pub fn prune_states( let cold_path = client_config.get_freezer_db_path(); let blobs_path = client_config.get_blobs_db_path(); - let db = HotColdDB::, LevelDB>::open( + let db = HotColdDB::, BeaconNodeBackend>::open( &hot_path, &cold_path, &blobs_path, diff --git a/lcli/Dockerfile b/lcli/Dockerfile index d2cb6f6f14..67bc290112 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -1,7 +1,7 @@ # `lcli` requires the full project to be in scope, so this should be built either: # - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .` # - from the current directory with the command: `docker build -f ./Dockerfile ../` -FROM rust:1.80.0-bullseye AS builder +FROM rust:1.84.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse ARG FEATURES diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index eda9a2ebf2..c95735d41c 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -4,10 +4,10 @@ version = "6.0.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false -rust-version = "1.80.0" +rust-version = "1.83.0" [features] -default = ["slasher-lmdb"] +default = ["slasher-lmdb", "beacon-node-leveldb"] # Writes debugging .ssz files to /tmp during block processing. write_ssz_files = ["beacon_node/write_ssz_files"] # Compiles the BLS crypto code so that the binary is portable across machines. @@ -24,6 +24,11 @@ slasher-mdbx = ["slasher/mdbx"] slasher-lmdb = ["slasher/lmdb"] # Support slasher redb backend. slasher-redb = ["slasher/redb"] +# Supports beacon node leveldb backend. +beacon-node-leveldb = ["store/leveldb"] +# Supports beacon node redb backend. +beacon-node-redb = ["store/redb"] + # Deprecated. This is now enabled by default on non windows targets. jemalloc = [] @@ -56,6 +61,7 @@ serde_json = { workspace = true } serde_yaml = { workspace = true } slasher = { workspace = true } slog = { workspace = true } +store = { workspace = true } task_executor = { workspace = true } types = { workspace = true } unused_port = { workspace = true } diff --git a/lighthouse/src/cli.rs b/lighthouse/src/cli.rs index 90d3e811eb..ed665d2a47 100644 --- a/lighthouse/src/cli.rs +++ b/lighthouse/src/cli.rs @@ -1,9 +1,12 @@ use clap::Parser; use database_manager::cli::DatabaseManager; use serde::{Deserialize, Serialize}; +use validator_client::cli::ValidatorClient; #[derive(Parser, Clone, Deserialize, Serialize, Debug)] pub enum LighthouseSubcommands { #[clap(name = "database_manager")] - DatabaseManager(DatabaseManager), + DatabaseManager(Box), + #[clap(name = "validator_client")] + ValidatorClient(Box), } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 43c5e1107c..d7a14e3809 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -66,11 +66,15 @@ fn bls_hardware_acceleration() -> bool { return std::arch::is_aarch64_feature_detected!("neon"); } -fn allocator_name() -> &'static str { - if cfg!(target_os = "windows") { - "system" - } else { - "jemalloc" +fn allocator_name() -> String { + #[cfg(target_os = "windows")] + { + "system".to_string() + } + #[cfg(not(target_os = "windows"))] + match malloc_utils::jemalloc::page_size() { + Ok(page_size) => format!("jemalloc ({}K)", page_size / 1024), + Err(e) => format!("jemalloc (error: {e:?})"), } } @@ -395,10 +399,10 @@ fn main() { .action(ArgAction::HelpLong) .display_order(0) .help_heading(FLAG_HEADER) + .global(true) ) .subcommand(beacon_node::cli_app()) .subcommand(boot_node::cli_app()) - .subcommand(validator_client::cli_app()) .subcommand(account_manager::cli_app()) .subcommand(validator_manager::cli_app()); @@ -669,12 +673,49 @@ fn run( return Ok(()); } - if let Ok(LighthouseSubcommands::DatabaseManager(db_manager_config)) = - LighthouseSubcommands::from_arg_matches(matches) - { - info!(log, "Running database manager for {} network", network_name); - database_manager::run(matches, &db_manager_config, environment)?; - return Ok(()); + match LighthouseSubcommands::from_arg_matches(matches) { + Ok(LighthouseSubcommands::DatabaseManager(db_manager_config)) => { + info!(log, "Running database manager for {} network", network_name); + database_manager::run(matches, &db_manager_config, environment)?; + return Ok(()); + } + Ok(LighthouseSubcommands::ValidatorClient(validator_client_config)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = validator_client::Config::from_cli( + matches, + &validator_client_config, + context.log(), + ) + .map_err(|e| format!("Unable to initialize validator config: {}", e))?; + // Dump configs if `dump-config` or `dump-chain-config` flags are set + clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; + + let shutdown_flag = matches.get_flag("immediate-shutdown"); + if shutdown_flag { + info!(log, "Validator client immediate shutdown triggered."); + return Ok(()); + } + + executor.clone().spawn( + async move { + if let Err(e) = ProductionValidatorClient::new(context, config) + .and_then(|mut vc| async move { vc.start_service().await }) + .await + { + crit!(log, "Failed to start validator client"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send(ShutdownReason::Failure("Failed to start validator client")); + } + }, + "validator_client", + ); + } + Err(_) => (), }; info!(log, "Lighthouse started"; "version" => VERSION); @@ -729,38 +770,9 @@ fn run( "beacon_node", ); } - Some(("validator_client", matches)) => { - let context = environment.core_context(); - let log = context.log().clone(); - let executor = context.executor.clone(); - let config = validator_client::Config::from_cli(matches, context.log()) - .map_err(|e| format!("Unable to initialize validator config: {}", e))?; - // Dump configs if `dump-config` or `dump-chain-config` flags are set - clap_utils::check_dump_configs::<_, E>(matches, &config, &context.eth2_config.spec)?; - - let shutdown_flag = matches.get_flag("immediate-shutdown"); - if shutdown_flag { - info!(log, "Validator client immediate shutdown triggered."); - return Ok(()); - } - - executor.clone().spawn( - async move { - if let Err(e) = ProductionValidatorClient::new(context, config) - .and_then(|mut vc| async move { vc.start_service().await }) - .await - { - crit!(log, "Failed to start validator client"; "reason" => e); - // Ignore the error since it always occurs during normal operation when - // shutting down. - let _ = executor - .shutdown_sender() - .try_send(ShutdownReason::Failure("Failed to start validator client")); - } - }, - "validator_client", - ); - } + // TODO(clap-derive) delete this once we've fully migrated to clap derive. + // Qt the moment this needs to exist so that we dont trigger a crit. + Some(("validator_client", _)) => (), _ => { crit!(log, "No subcommand supplied. See --help ."); return Err("No subcommand supplied.".into()); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 88e05dfa12..03314930b9 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1,11 +1,12 @@ -use beacon_node::ClientConfig as Config; - use crate::exec::{CommandLineTestExec, CompletedTest}; use beacon_node::beacon_chain::chain_config::{ DisallowedReOrgOffsets, DEFAULT_RE_ORG_CUTOFF_DENOMINATOR, DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, }; -use beacon_node::beacon_chain::graffiti_calculator::GraffitiOrigin; +use beacon_node::{ + beacon_chain::graffiti_calculator::GraffitiOrigin, + beacon_chain::store::config::DatabaseBackend as BeaconNodeBackend, ClientConfig as Config, +}; use beacon_processor::BeaconProcessorConfig; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; @@ -2503,9 +2504,9 @@ fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert!(!config.network.enable_light_client_server); - assert!(!config.chain.enable_light_client_server); - assert!(!config.http_api.enable_light_client_server); + assert!(config.network.enable_light_client_server); + assert!(config.chain.enable_light_client_server); + assert!(config.http_api.enable_light_client_server); }); } @@ -2521,13 +2522,26 @@ fn light_client_server_enabled() { } #[test] -fn light_client_http_server_enabled() { +fn light_client_server_disabled() { CommandLineTest::new() - .flag("http", None) - .flag("light-client-server", None) + .flag("disable-light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert!(config.http_api.enable_light_client_server); + assert!(!config.network.enable_light_client_server); + assert!(!config.chain.enable_light_client_server); + }); +} + +#[test] +fn light_client_http_server_disabled() { + CommandLineTest::new() + .flag("http", None) + .flag("disable-light-client-server", None) + .run_with_zero_port() + .with_config(|config| { + assert!(!config.http_api.enable_light_client_server); + assert!(!config.network.enable_light_client_server); + assert!(!config.chain.enable_light_client_server); }); } @@ -2691,3 +2705,13 @@ fn genesis_state_url_value() { assert_eq!(config.genesis_state_url_timeout, Duration::from_secs(42)); }); } + +#[test] +fn beacon_node_backend_override() { + CommandLineTest::new() + .flag("beacon-node-backend", Some("leveldb")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.store.backend, BeaconNodeBackend::LevelDb); + }); +} diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 1945399c86..f28e7d9829 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -407,6 +407,13 @@ fn metrics_port_flag() { .with_config(|config| assert_eq!(config.http_metrics.listen_port, 9090)); } #[test] +fn metrics_port_flag_default() { + CommandLineTest::new() + .flag("metrics", None) + .run() + .with_config(|config| assert_eq!(config.http_metrics.listen_port, 5064)); +} +#[test] fn metrics_allow_origin_flag() { CommandLineTest::new() .flag("metrics", None) @@ -458,7 +465,7 @@ fn no_doppelganger_protection_flag() { fn no_gas_limit_flag() { CommandLineTest::new() .run() - .with_config(|config| assert!(config.validator_store.gas_limit.is_none())); + .with_config(|config| assert!(config.validator_store.gas_limit == Some(30_000_000))); } #[test] fn gas_limit_flag() { @@ -560,7 +567,7 @@ fn broadcast_flag() { }); // Other valid variants CommandLineTest::new() - .flag("broadcast", Some("blocks, subscriptions")) + .flag("broadcast", Some("blocks,subscriptions")) .run() .with_config(|config| { assert_eq!( @@ -605,7 +612,7 @@ fn beacon_nodes_sync_tolerances_flag() { } #[test] -#[should_panic(expected = "Unknown API topic")] +#[should_panic(expected = "invalid value")] fn wrong_broadcast_flag() { CommandLineTest::new() .flag("broadcast", Some("foo, subscriptions")) diff --git a/scripts/ci/check-lockbud.sh b/scripts/ci/check-lockbud.sh new file mode 100755 index 0000000000..8e1d33b53b --- /dev/null +++ b/scripts/ci/check-lockbud.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Run lockbud to check for deadlocks and capture the output +output=$(cargo lockbud -k deadlock -b -l tokio_util 2>&1) + +# Check if lockbud returned any issues +if echo "$output" | grep -q '"bug_kind"'; then + # Print the JSON payload + echo "Lockbud detected issues:" + echo "$output" + + # Exit with a non-zero status to indicate an error + exit 1 +else + echo "No issues detected by Lockbud." + exit 0 +fi \ No newline at end of file diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml index ab2f07a24e..030aa2b820 100644 --- a/scripts/local_testnet/network_params_das.yaml +++ b/scripts/local_testnet/network_params_das.yaml @@ -3,6 +3,7 @@ participants: cl_image: lighthouse:local cl_extra_params: - --subscribe-all-data-column-subnets + - --subscribe-all-subnets - --target-peers=3 count: 2 - cl_type: lighthouse @@ -11,11 +12,14 @@ participants: - --target-peers=3 count: 2 network_params: - eip7594_fork_epoch: 0 + electra_fork_epoch: 1 + fulu_fork_epoch: 2 seconds_per_slot: 6 snooper_enabled: false global_log_level: debug additional_services: - dora - - goomy_blob + - spamoor_blob - prometheus_grafana +dora_params: + image: ethpandaops/dora:fulu-support \ No newline at end of file diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 7108e3e8f6..c32a670e9a 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-beta.0 +TESTS_TAG := v1.5.0-beta.2 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index bf9e5d6cfa..8a662b72e3 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -49,11 +49,9 @@ excluded_paths = [ "bls12-381-tests/hash_to_G2", "tests/.*/eip6110", "tests/.*/whisk", - "tests/.*/eip7594", - # Fulu tests are not yet being run + # TODO(das): Fulu tests are ignored for now "tests/.*/fulu", - # TODO(electra): SingleAttestation tests are waiting on Eitan's PR - "tests/.*/electra/ssz_static/SingleAttestation" + "tests/.*/fulu/ssz_static/MatrixEntry", ] diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 54a142a96b..4a202ee3d2 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -91,6 +91,9 @@ pub use transition::TransitionTest; /// to return `true` for the feature in order for the feature test vector to be tested. #[derive(Debug, PartialEq, Clone, Copy)] pub enum FeatureName { + // TODO(fulu): to be removed once we start using Fulu types for test vectors. + // Existing SSZ types for PeerDAS (Fulu) are the same as Electra, so the test vectors get + // loaded as Electra types (default serde behaviour for untagged enums). Fulu, } diff --git a/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs b/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs index 1d0bf951bc..8a6330d399 100644 --- a/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs +++ b/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs @@ -21,12 +21,8 @@ impl LoadCase for ComputeColumnsForCustodyGroups { } impl Case for ComputeColumnsForCustodyGroups { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/get_custody_groups.rs b/testing/ef_tests/src/cases/get_custody_groups.rs index f8c4370aeb..1c1294305f 100644 --- a/testing/ef_tests/src/cases/get_custody_groups.rs +++ b/testing/ef_tests/src/cases/get_custody_groups.rs @@ -24,12 +24,8 @@ impl LoadCase for GetCustodyGroups { } impl Case for GetCustodyGroups { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs index 8df43bb267..6ab9a8db65 100644 --- a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs @@ -26,12 +26,8 @@ impl LoadCase for KZGComputeCellsAndKZGProofs { } impl Case for KZGComputeCellsAndKZGProofs { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs index 26ab4e96b5..732cb54f31 100644 --- a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs @@ -27,12 +27,8 @@ impl LoadCase for KZGRecoverCellsAndKZGProofs { } impl Case for KZGRecoverCellsAndKZGProofs { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs index fc625063b1..e3edc0df0a 100644 --- a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs @@ -29,12 +29,8 @@ impl LoadCase for KZGVerifyCellKZGProofBatch { } impl Case for KZGVerifyCellKZGProofBatch { - fn is_enabled_for_fork(_fork_name: ForkName) -> bool { - false - } - - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Fulu + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.fulu_enabled() } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index adb5bee768..7178edb151 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -25,9 +25,9 @@ use std::fmt::Debug; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconState, - BlindedPayload, ConsolidationRequest, Deposit, DepositRequest, ExecutionPayload, FullPayload, - ProposerSlashing, SignedBlsToExecutionChange, SignedVoluntaryExit, SyncAggregate, - WithdrawalRequest, + BlindedPayload, ConsolidationRequest, Deposit, DepositRequest, ExecutionPayload, + ForkVersionDecode, FullPayload, ProposerSlashing, SignedBlsToExecutionChange, + SignedVoluntaryExit, SyncAggregate, WithdrawalRequest, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -398,7 +398,7 @@ impl Operation for WithdrawalsPayload { fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { ssz_decode_file_with(path, |bytes| { - ExecutionPayload::from_ssz_bytes(bytes, fork_name) + ExecutionPayload::from_ssz_bytes_by_fork(bytes, fork_name) }) .map(|payload| WithdrawalsPayload { payload: payload.into(), diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 6c0165efab..481c9b2169 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -355,11 +355,14 @@ where } fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - // This ensures we only run the tests **once** for the feature, using the types matching the - // correct fork, e.g. `Fulu` uses SSZ types from `Electra` fork as of spec test version - // `v1.5.0-beta.0`, therefore the `Fulu` tests should get included when testing Electra types. + // TODO(fulu): to be removed once Fulu types start differing from Electra. We currently run Fulu tests as a + // "feature" - this means we use Electra types for Fulu SSZ tests (except for PeerDAS types, e.g. `DataColumnSidecar`). // - // e.g. Fulu test vectors are executed in the first line below, but excluded in the 2nd + // This ensures we only run the tests **once** for `Fulu`, using the types matching the + // correct fork, e.g. `Fulu` uses SSZ types from `Electra` as of spec test version + // `v1.5.0-beta.0`, therefore the `Fulu` tests should get included when testing Deneb types. + // + // e.g. Fulu test vectors are executed in the 2nd line below, but excluded in the 1st // line when testing the type `AttestationElectra`: // // ``` @@ -677,6 +680,11 @@ impl Handler for ForkChoiceHandler { return false; } + // Deposit tests exist only after Electra. + if self.handler_name == "deposit_with_reorg" && !fork_name.electra_enabled() { + return false; + } + // These tests check block validity (which may include signatures) and there is no need to // run them with fake crypto. cfg!(not(feature = "fake_crypto")) @@ -890,6 +898,10 @@ impl Handler for GetCustodyGroupsHandler { fn handler_name(&self) -> String { "get_custody_groups".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] @@ -910,6 +922,10 @@ impl Handler for ComputeColumnsForCustodyGroupHandler fn handler_name(&self) -> String { "compute_columns_for_custody_group".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] @@ -930,6 +946,10 @@ impl Handler for KZGComputeCellsAndKZGProofHandler { fn handler_name(&self) -> String { "compute_cells_and_kzg_proofs".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] @@ -950,6 +970,10 @@ impl Handler for KZGVerifyCellKZGProofBatchHandler { fn handler_name(&self) -> String { "verify_cell_kzg_proof_batch".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] @@ -970,6 +994,10 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { fn handler_name(&self) -> String { "recover_cells_and_kzg_proofs".into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } } #[derive(Derivative)] diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index c50032a63d..dfee385958 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -54,6 +54,7 @@ type_name_generic!(BeaconBlockBodyBellatrix, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyCapella, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyDeneb, "BeaconBlockBody"); type_name_generic!(BeaconBlockBodyElectra, "BeaconBlockBody"); +type_name_generic!(BeaconBlockBodyFulu, "BeaconBlockBody"); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(BlobIdentifier); @@ -74,12 +75,14 @@ type_name_generic!(ExecutionPayloadBellatrix, "ExecutionPayload"); type_name_generic!(ExecutionPayloadCapella, "ExecutionPayload"); type_name_generic!(ExecutionPayloadDeneb, "ExecutionPayload"); type_name_generic!(ExecutionPayloadElectra, "ExecutionPayload"); +type_name_generic!(ExecutionPayloadFulu, "ExecutionPayload"); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); type_name_generic!(ExecutionPayloadHeaderBellatrix, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderCapella, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderDeneb, "ExecutionPayloadHeader"); type_name_generic!(ExecutionPayloadHeaderElectra, "ExecutionPayloadHeader"); +type_name_generic!(ExecutionPayloadHeaderFulu, "ExecutionPayloadHeader"); type_name_generic!(ExecutionRequests); type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); @@ -93,6 +96,7 @@ type_name_generic!(LightClientBootstrapAltair, "LightClientBootstrap"); type_name_generic!(LightClientBootstrapCapella, "LightClientBootstrap"); type_name_generic!(LightClientBootstrapDeneb, "LightClientBootstrap"); type_name_generic!(LightClientBootstrapElectra, "LightClientBootstrap"); +type_name_generic!(LightClientBootstrapFulu, "LightClientBootstrap"); type_name_generic!(LightClientFinalityUpdate); type_name_generic!(LightClientFinalityUpdateAltair, "LightClientFinalityUpdate"); type_name_generic!( @@ -104,11 +108,13 @@ type_name_generic!( LightClientFinalityUpdateElectra, "LightClientFinalityUpdate" ); +type_name_generic!(LightClientFinalityUpdateFulu, "LightClientFinalityUpdate"); type_name_generic!(LightClientHeader); type_name_generic!(LightClientHeaderAltair, "LightClientHeader"); type_name_generic!(LightClientHeaderCapella, "LightClientHeader"); type_name_generic!(LightClientHeaderDeneb, "LightClientHeader"); type_name_generic!(LightClientHeaderElectra, "LightClientHeader"); +type_name_generic!(LightClientHeaderFulu, "LightClientHeader"); type_name_generic!(LightClientOptimisticUpdate); type_name_generic!( LightClientOptimisticUpdateAltair, @@ -126,11 +132,16 @@ type_name_generic!( LightClientOptimisticUpdateElectra, "LightClientOptimisticUpdate" ); +type_name_generic!( + LightClientOptimisticUpdateFulu, + "LightClientOptimisticUpdate" +); type_name_generic!(LightClientUpdate); type_name_generic!(LightClientUpdateAltair, "LightClientUpdate"); type_name_generic!(LightClientUpdateCapella, "LightClientUpdate"); type_name_generic!(LightClientUpdateDeneb, "LightClientUpdate"); type_name_generic!(LightClientUpdateElectra, "LightClientUpdate"); +type_name_generic!(LightClientUpdateFulu, "LightClientUpdate"); type_name_generic!(PendingAttestation); type_name!(PendingConsolidation); type_name!(PendingPartialWithdrawal); @@ -144,6 +155,7 @@ type_name!(SignedBeaconBlockHeader); type_name_generic!(SignedContributionAndProof); type_name!(SignedVoluntaryExit); type_name!(SigningData); +type_name!(SingleAttestation); type_name_generic!(SyncCommitteeContribution); type_name!(SyncCommitteeMessage); type_name!(SyncAggregatorSelectionData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 61581128d4..1f5a7dd997 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -276,21 +276,27 @@ mod ssz_static { fn attestation() { SszStaticHandler::, MinimalEthSpec>::pre_electra().run(); SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); - SszStaticHandler::, MinimalEthSpec>::electra_only() + SszStaticHandler::, MinimalEthSpec>::electra_and_later() .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only() + SszStaticHandler::, MainnetEthSpec>::electra_and_later() .run(); } + #[test] + fn single_attestation() { + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + } + #[test] fn attester_slashing() { SszStaticHandler::, MinimalEthSpec>::pre_electra() .run(); SszStaticHandler::, MainnetEthSpec>::pre_electra() .run(); - SszStaticHandler::, MinimalEthSpec>::electra_only() + SszStaticHandler::, MinimalEthSpec>::electra_and_later() .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only() + SszStaticHandler::, MainnetEthSpec>::electra_and_later() .run(); } @@ -300,9 +306,9 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::pre_electra() .run(); - SszStaticHandler::, MinimalEthSpec>::electra_only() + SszStaticHandler::, MinimalEthSpec>::electra_and_later() .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only() + SszStaticHandler::, MainnetEthSpec>::electra_and_later() .run(); } @@ -314,10 +320,10 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::pre_electra( ) .run(); - SszStaticHandler::, MinimalEthSpec>::electra_only( + SszStaticHandler::, MinimalEthSpec>::electra_and_later( ) .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only( + SszStaticHandler::, MainnetEthSpec>::electra_and_later( ) .run(); } @@ -328,10 +334,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::pre_electra() .run(); - SszStaticHandler::, MinimalEthSpec>::electra_only( + SszStaticHandler::, MinimalEthSpec>::electra_and_later( ) .run(); - SszStaticHandler::, MainnetEthSpec>::electra_only( + SszStaticHandler::, MainnetEthSpec>::electra_and_later( ) .run(); } @@ -361,6 +367,8 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::electra_only() .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only().run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only().run(); } // Altair and later @@ -399,6 +407,10 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::electra_only() .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only() + .run(); } // LightClientHeader has no internal indicator of which fork it is for, so we test it separately. @@ -430,6 +442,10 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::electra_only( ) .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only() + .run(); } // LightClientOptimisticUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -445,6 +461,8 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only().run(); SszStaticHandler::, MinimalEthSpec>::electra_only().run(); SszStaticHandler::, MainnetEthSpec>::electra_only().run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only().run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only().run(); } // LightClientFinalityUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -480,6 +498,12 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::electra_only( ) .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only( + ) + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only( + ) + .run(); } // LightClientUpdate has no internal indicator of which fork it is for, so we test it separately. @@ -509,6 +533,10 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::electra_only( ) .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only() + .run(); } #[test] @@ -566,6 +594,8 @@ mod ssz_static { .run(); SszStaticHandler::, MainnetEthSpec>::electra_only() .run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only().run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only().run(); } #[test] @@ -586,6 +616,10 @@ mod ssz_static { ::electra_only().run(); SszStaticHandler::, MainnetEthSpec> ::electra_only().run(); + SszStaticHandler::, MinimalEthSpec>::fulu_only() + .run(); + SszStaticHandler::, MainnetEthSpec>::fulu_only() + .run(); } #[test] @@ -626,17 +660,17 @@ mod ssz_static { #[test] fn data_column_sidecar() { - SszStaticHandler::, MinimalEthSpec>::deneb_only() + SszStaticHandler::, MinimalEthSpec>::default() .run_for_feature(FeatureName::Fulu); - SszStaticHandler::, MainnetEthSpec>::deneb_only() + SszStaticHandler::, MainnetEthSpec>::default() .run_for_feature(FeatureName::Fulu); } #[test] fn data_column_identifier() { - SszStaticHandler::::deneb_only() + SszStaticHandler::::default() .run_for_feature(FeatureName::Fulu); - SszStaticHandler::::deneb_only() + SszStaticHandler::::default() .run_for_feature(FeatureName::Fulu); } @@ -852,6 +886,12 @@ fn fork_choice_get_proposer_head() { ForkChoiceHandler::::new("get_proposer_head").run(); } +#[test] +fn fork_choice_deposit_with_reorg() { + ForkChoiceHandler::::new("deposit_with_reorg").run(); + // There is no mainnet variant for this test. +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); @@ -901,20 +941,17 @@ fn kzg_verify_kzg_proof() { #[test] fn kzg_compute_cells_and_proofs() { - KZGComputeCellsAndKZGProofHandler::::default() - .run_for_feature(FeatureName::Fulu); + KZGComputeCellsAndKZGProofHandler::::default().run(); } #[test] fn kzg_verify_cell_proof_batch() { - KZGVerifyCellKZGProofBatchHandler::::default() - .run_for_feature(FeatureName::Fulu); + KZGVerifyCellKZGProofBatchHandler::::default().run(); } #[test] fn kzg_recover_cells_and_proofs() { - KZGRecoverCellsAndKZGProofHandler::::default() - .run_for_feature(FeatureName::Fulu); + KZGRecoverCellsAndKZGProofHandler::::default().run(); } #[test] @@ -949,14 +986,12 @@ fn rewards() { #[test] fn get_custody_groups() { - GetCustodyGroupsHandler::::default().run_for_feature(FeatureName::Fulu); - GetCustodyGroupsHandler::::default().run_for_feature(FeatureName::Fulu); + GetCustodyGroupsHandler::::default().run(); + GetCustodyGroupsHandler::::default().run() } #[test] fn compute_columns_for_custody_group() { - ComputeColumnsForCustodyGroupHandler::::default() - .run_for_feature(FeatureName::Fulu); - ComputeColumnsForCustodyGroupHandler::::default() - .run_for_feature(FeatureName::Fulu); + ComputeColumnsForCustodyGroupHandler::::default().run(); + ComputeColumnsForCustodyGroupHandler::::default().run(); } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index 0bd96a5c93..ea143ed433 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,7 +7,10 @@ use std::{env, fs}; use tempfile::TempDir; use unused_port::unused_tcp4_port; -const GETH_BRANCH: &str = "master"; +// This is not currently used due to the following breaking changes in geth that requires updating our tests: +// 1. removal of `personal` namespace in v1.14.12: See #30704 +// 2. removal of `totalDifficulty` field from RPC in v1.14.11. See #30386. +// const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { @@ -27,12 +30,14 @@ pub fn build(execution_clients_dir: &Path) { } // Get the latest tag on the branch - let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); - build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); + // let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + // Using an older release due to breaking changes in recent releases. See comment on `GETH_BRANCH` const. + let release_tag = "v1.14.10"; + build_utils::checkout(&repo_dir, dbg!(release_tag)).unwrap(); // Build geth build_utils::check_command_output(build_result(&repo_dir), || { - format!("geth make failed using release {last_release}") + format!("geth make failed using release {release_tag}") }); } diff --git a/testing/validator_test_rig/Cargo.toml b/testing/validator_test_rig/Cargo.toml new file mode 100644 index 0000000000..76560b8afc --- /dev/null +++ b/testing/validator_test_rig/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "validator_test_rig" +version = "0.1.0" +edition = { workspace = true } + +[dependencies] +eth2 = { workspace = true } +logging = { workspace = true } +mockito = { workspace = true } +regex = { workspace = true } +sensitive_url = { workspace = true } +serde_json = { workspace = true } +slog = { workspace = true } +types = { workspace = true } diff --git a/testing/validator_test_rig/src/lib.rs b/testing/validator_test_rig/src/lib.rs new file mode 100644 index 0000000000..a0a979dfc8 --- /dev/null +++ b/testing/validator_test_rig/src/lib.rs @@ -0,0 +1 @@ +pub mod mock_beacon_node; diff --git a/testing/validator_test_rig/src/mock_beacon_node.rs b/testing/validator_test_rig/src/mock_beacon_node.rs new file mode 100644 index 0000000000..f875116155 --- /dev/null +++ b/testing/validator_test_rig/src/mock_beacon_node.rs @@ -0,0 +1,132 @@ +use eth2::types::{GenericResponse, SyncingData}; +use eth2::{BeaconNodeHttpClient, StatusCode, Timeouts}; +use logging::test_logger; +use mockito::{Matcher, Mock, Server, ServerGuard}; +use regex::Regex; +use sensitive_url::SensitiveUrl; +use slog::{info, Logger}; +use std::marker::PhantomData; +use std::str::FromStr; +use std::sync::{Arc, Mutex}; +use std::time::Duration; +use types::{ChainSpec, ConfigAndPreset, EthSpec, SignedBlindedBeaconBlock}; + +pub struct MockBeaconNode { + server: ServerGuard, + pub beacon_api_client: BeaconNodeHttpClient, + log: Logger, + _phantom: PhantomData, + pub received_blocks: Arc>>>, +} + +impl MockBeaconNode { + pub async fn new() -> Self { + // mock server logging + let server = Server::new_async().await; + let beacon_api_client = BeaconNodeHttpClient::new( + SensitiveUrl::from_str(&server.url()).unwrap(), + Timeouts::set_all(Duration::from_secs(1)), + ); + let log = test_logger(); + Self { + server, + beacon_api_client, + log, + _phantom: PhantomData, + received_blocks: Arc::new(Mutex::new(Vec::new())), + } + } + + /// Resets all mocks + #[allow(dead_code)] + pub fn reset_mocks(&mut self) { + self.server.reset(); + } + + pub fn mock_config_spec(&mut self, spec: &ChainSpec) { + let path_pattern = Regex::new(r"^/eth/v1/config/spec$").unwrap(); + let config_and_preset = ConfigAndPreset::from_chain_spec::(spec, None); + let data = GenericResponse::from(config_and_preset); + self.server + .mock("GET", Matcher::Regex(path_pattern.to_string())) + .with_status(200) + .with_body(serde_json::to_string(&data).unwrap()) + .create(); + } + + pub fn mock_get_node_syncing(&mut self, response: SyncingData) { + let path_pattern = Regex::new(r"^/eth/v1/node/syncing$").unwrap(); + + let data = GenericResponse::from(response); + + self.server + .mock("GET", Matcher::Regex(path_pattern.to_string())) + .with_status(200) + .with_body(serde_json::to_string(&data).unwrap()) + .create(); + } + + /// Mocks the `post_beacon_blinded_blocks_v2_ssz` response with an optional `delay`. + pub fn mock_post_beacon_blinded_blocks_v2_ssz(&mut self, delay: Duration) -> Mock { + let path_pattern = Regex::new(r"^/eth/v2/beacon/blinded_blocks$").unwrap(); + let log = self.log.clone(); + let url = self.server.url(); + + let received_blocks = Arc::clone(&self.received_blocks); + + self.server + .mock("POST", Matcher::Regex(path_pattern.to_string())) + .match_header("content-type", "application/octet-stream") + .with_status(200) + .with_body_from_request(move |request| { + info!( + log, + "{}", + format!( + "Received published block request on server {} with delay {} s", + url, + delay.as_secs(), + ) + ); + + let body = request.body().expect("Failed to get request body"); + let block: SignedBlindedBeaconBlock = + SignedBlindedBeaconBlock::any_from_ssz_bytes(body) + .expect("Failed to deserialize body as SignedBlindedBeaconBlock"); + + received_blocks.lock().unwrap().push(block); + + std::thread::sleep(delay); + vec![] + }) + .create() + } + + pub fn mock_offline_node(&mut self) -> Mock { + let path_pattern = Regex::new(r"^/eth/v1/node/version$").unwrap(); + + self.server + .mock("GET", Matcher::Regex(path_pattern.to_string())) + .with_status(StatusCode::INTERNAL_SERVER_ERROR.as_u16() as usize) + .with_header("content-type", "application/json") + .with_body(r#"{"message":"Internal Server Error"}"#) + .create() + } + + pub fn mock_online_node(&mut self) -> Mock { + let path_pattern = Regex::new(r"^/eth/v1/node/version$").unwrap(); + + self.server + .mock("GET", Matcher::Regex(path_pattern.to_string())) + .with_status(200) + .with_header("content-type", "application/json") + .with_body( + r#"{ + "data": { + "version": "lighthouse-mock" + } + }"#, + ) + .create() + } +} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 8f945dc0d2..9b4887b478 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -8,9 +8,6 @@ edition = { workspace = true } name = "validator_client" path = "src/lib.rs" -[dev-dependencies] -tokio = { workspace = true } - [dependencies] account_utils = { workspace = true } beacon_node_fallback = { workspace = true } diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml index a871beb03b..ccf2d650a6 100644 --- a/validator_client/beacon_node_fallback/Cargo.toml +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -9,6 +9,7 @@ name = "beacon_node_fallback" path = "src/lib.rs" [dependencies] +clap = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } itertools = { workspace = true } @@ -20,3 +21,7 @@ tokio = { workspace = true } tracing = { workspace = true } types = { workspace = true } validator_metrics = { workspace = true } + +[dev-dependencies] +logging = { workspace = true } +validator_test_rig = { workspace = true } diff --git a/validator_client/beacon_node_fallback/src/beacon_node_health.rs b/validator_client/beacon_node_fallback/src/beacon_node_health.rs index 33ebc659b3..1b5d5b98cb 100644 --- a/validator_client/beacon_node_fallback/src/beacon_node_health.rs +++ b/validator_client/beacon_node_fallback/src/beacon_node_health.rs @@ -1,10 +1,8 @@ use super::CandidateError; use eth2::BeaconNodeHttpClient; -use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::cmp::Ordering; use std::fmt::{Debug, Display, Formatter}; -use std::str::FromStr; use tracing::warn; use types::Slot; @@ -53,29 +51,6 @@ impl Default for BeaconNodeSyncDistanceTiers { } } -impl FromStr for BeaconNodeSyncDistanceTiers { - type Err = String; - - fn from_str(s: &str) -> Result { - let values: (u64, u64, u64) = s - .split(',') - .map(|s| { - s.parse() - .map_err(|e| format!("Invalid sync distance modifier: {e:?}")) - }) - .collect::, _>>()? - .into_iter() - .collect_tuple() - .ok_or("Invalid number of sync distance modifiers".to_string())?; - - Ok(BeaconNodeSyncDistanceTiers { - synced: Slot::new(values.0), - small: Slot::new(values.0 + values.1), - medium: Slot::new(values.0 + values.1 + values.2), - }) - } -} - impl BeaconNodeSyncDistanceTiers { /// Takes a given sync distance and determines its tier based on the `sync_tolerance` defined by /// the CLI. @@ -90,6 +65,17 @@ impl BeaconNodeSyncDistanceTiers { SyncDistanceTier::Large } } + + pub fn from_vec(tiers: &[u64]) -> Result { + if tiers.len() != 3 { + return Err("Invalid number of sync distance modifiers".to_string()); + } + Ok(BeaconNodeSyncDistanceTiers { + synced: Slot::new(tiers[0]), + small: Slot::new(tiers[0] + tiers[1]), + medium: Slot::new(tiers[0] + tiers[1] + tiers[2]), + }) + } } /// Execution Node health metrics. @@ -318,7 +304,6 @@ mod tests { SyncDistanceTier, }; use crate::Config; - use std::str::FromStr; use types::Slot; #[test] @@ -421,7 +406,7 @@ mod tests { // medium 9..=12 // large: 13.. - let distance_tiers = BeaconNodeSyncDistanceTiers::from_str("4,4,4").unwrap(); + let distance_tiers = BeaconNodeSyncDistanceTiers::from_vec(&[4, 4, 4]).unwrap(); let synced_low = new_distance_tier(0, &distance_tiers); let synced_high = new_distance_tier(4, &distance_tiers); diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 48985f3256..9cdac02389 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -7,6 +7,7 @@ use beacon_node_health::{ check_node_health, BeaconNodeHealth, BeaconNodeSyncDistanceTiers, ExecutionEngineHealth, IsOptimistic, SyncDistanceTier, }; +use clap::ValueEnum; use eth2::BeaconNodeHttpClient; use futures::future; use serde::{ser::SerializeStruct, Deserialize, Serialize, Serializer}; @@ -17,7 +18,8 @@ use std::fmt::Debug; use std::future::Future; use std::sync::Arc; use std::time::{Duration, Instant}; -use strum::{EnumString, EnumVariantNames}; +use std::vec::Vec; +use strum::EnumVariantNames; use task_executor::TaskExecutor; use tokio::{sync::RwLock, time::sleep}; use tracing::{debug, error, warn}; @@ -695,9 +697,10 @@ async fn sort_nodes_by_health(nodes: &mut Vec) { } /// Serves as a cue for `BeaconNodeFallback` to tell which requests need to be broadcasted. -#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, EnumString, EnumVariantNames)] +#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, EnumVariantNames, ValueEnum)] #[strum(serialize_all = "kebab-case")] pub enum ApiTopic { + None, Attestations, Blocks, Subscriptions, @@ -717,23 +720,30 @@ mod tests { use crate::beacon_node_health::BeaconNodeHealthTier; use eth2::SensitiveUrl; use eth2::Timeouts; - use std::str::FromStr; + use slot_clock::TestingSlotClock; use strum::VariantNames; - use types::Slot; + use types::{BeaconBlockDeneb, MainnetEthSpec, Slot}; + use types::{EmptyBlock, Signature, SignedBeaconBlockDeneb, SignedBlindedBeaconBlock}; + use validator_test_rig::mock_beacon_node::MockBeaconNode; + + type E = MainnetEthSpec; #[test] fn api_topic_all() { let all = ApiTopic::all(); - assert_eq!(all.len(), ApiTopic::VARIANTS.len()); - assert!(ApiTopic::VARIANTS + // ignore NONE variant + let mut variants = ApiTopic::VARIANTS.to_vec(); + variants.retain(|s| *s != "none"); + assert_eq!(all.len(), variants.len()); + assert!(variants .iter() - .map(|topic| ApiTopic::from_str(topic).unwrap()) + .map(|topic| ApiTopic::from_str(topic, true).unwrap()) .eq(all.into_iter())); } #[tokio::test] async fn check_candidate_order() { - // These fields is irrelvant for sorting. They are set to arbitrary values. + // These fields are irrelevant for sorting. They are set to arbitrary values. let head = Slot::new(99); let optimistic_status = IsOptimistic::No; let execution_status = ExecutionEngineHealth::Healthy; @@ -841,4 +851,168 @@ mod tests { assert_eq!(candidates, expected_candidates); } + + async fn new_mock_beacon_node( + index: usize, + spec: &ChainSpec, + ) -> (MockBeaconNode, CandidateBeaconNode) { + let mut mock_beacon_node = MockBeaconNode::new().await; + mock_beacon_node.mock_config_spec(spec); + + let beacon_node = + CandidateBeaconNode::new(mock_beacon_node.beacon_api_client.clone(), index); + + (mock_beacon_node, beacon_node) + } + + fn create_beacon_node_fallback( + candidates: Vec, + topics: Vec, + spec: Arc, + ) -> BeaconNodeFallback { + let mut beacon_node_fallback = + BeaconNodeFallback::new(candidates, Config::default(), topics, spec); + + beacon_node_fallback.set_slot_clock(TestingSlotClock::new( + Slot::new(1), + Duration::from_secs(0), + Duration::from_secs(12), + )); + + beacon_node_fallback + } + + #[tokio::test] + async fn update_all_candidates_should_update_sync_status() { + let spec = Arc::new(MainnetEthSpec::default_spec()); + let (mut mock_beacon_node_1, beacon_node_1) = new_mock_beacon_node(0, &spec).await; + let (mut mock_beacon_node_2, beacon_node_2) = new_mock_beacon_node(1, &spec).await; + let (mut mock_beacon_node_3, beacon_node_3) = new_mock_beacon_node(2, &spec).await; + + let beacon_node_fallback = create_beacon_node_fallback( + // Put this out of order to be sorted later + vec![ + beacon_node_2.clone(), + beacon_node_3.clone(), + beacon_node_1.clone(), + ], + vec![], + spec.clone(), + ); + + // BeaconNodeHealthTier 1 + mock_beacon_node_1.mock_get_node_syncing(eth2::types::SyncingData { + is_syncing: false, + is_optimistic: false, + el_offline: false, + head_slot: Slot::new(1), + sync_distance: Slot::new(0), + }); + // BeaconNodeHealthTier 3 + mock_beacon_node_2.mock_get_node_syncing(eth2::types::SyncingData { + is_syncing: false, + is_optimistic: false, + el_offline: true, + head_slot: Slot::new(1), + sync_distance: Slot::new(0), + }); + // BeaconNodeHealthTier 5 + mock_beacon_node_3.mock_get_node_syncing(eth2::types::SyncingData { + is_syncing: false, + is_optimistic: true, + el_offline: false, + head_slot: Slot::new(1), + sync_distance: Slot::new(0), + }); + + beacon_node_fallback.update_all_candidates::().await; + + let candidates = beacon_node_fallback.candidates.read().await; + assert_eq!( + vec![beacon_node_1, beacon_node_2, beacon_node_3], + *candidates + ); + } + + #[tokio::test] + async fn broadcast_should_send_to_all_bns() { + let spec = Arc::new(MainnetEthSpec::default_spec()); + let (mut mock_beacon_node_1, beacon_node_1) = new_mock_beacon_node(0, &spec).await; + let (mut mock_beacon_node_2, beacon_node_2) = new_mock_beacon_node(1, &spec).await; + + let beacon_node_fallback = create_beacon_node_fallback( + vec![beacon_node_1, beacon_node_2], + vec![ApiTopic::Blocks], + spec.clone(), + ); + + mock_beacon_node_1.mock_post_beacon_blinded_blocks_v2_ssz(Duration::from_secs(0)); + mock_beacon_node_2.mock_post_beacon_blinded_blocks_v2_ssz(Duration::from_secs(0)); + + let signed_block = SignedBlindedBeaconBlock::::Deneb(SignedBeaconBlockDeneb { + message: BeaconBlockDeneb::empty(&spec), + signature: Signature::empty(), + }); + + // trigger broadcast to `post_beacon_blinded_blocks_v2` + let result = beacon_node_fallback + .broadcast(|client| { + let signed_block_cloned = signed_block.clone(); + async move { + client + .post_beacon_blinded_blocks_v2_ssz(&signed_block_cloned, None) + .await + } + }) + .await; + + assert!(result.is_ok()); + + let received_blocks_from_bn_1 = mock_beacon_node_1.received_blocks.lock().unwrap(); + let received_blocks_from_bn_2 = mock_beacon_node_2.received_blocks.lock().unwrap(); + assert_eq!(received_blocks_from_bn_1.len(), 1); + assert_eq!(received_blocks_from_bn_2.len(), 1); + } + + #[tokio::test] + async fn first_success_should_try_nodes_in_order() { + let spec = Arc::new(MainnetEthSpec::default_spec()); + let (mut mock_beacon_node_1, beacon_node_1) = new_mock_beacon_node(0, &spec).await; + let (mut mock_beacon_node_2, beacon_node_2) = new_mock_beacon_node(1, &spec).await; + let (mut mock_beacon_node_3, beacon_node_3) = new_mock_beacon_node(2, &spec).await; + + let beacon_node_fallback = create_beacon_node_fallback( + vec![beacon_node_1, beacon_node_2, beacon_node_3], + vec![], + spec.clone(), + ); + + let mock1 = mock_beacon_node_1.mock_offline_node(); + let mock2 = mock_beacon_node_2.mock_offline_node(); + let mock3 = mock_beacon_node_3.mock_online_node(); + + let result_success = beacon_node_fallback + .first_success(|client| async move { client.get_node_version().await }) + .await; + + // mock3 expects to be called once since it is online in the first pass + mock3.expect(1).assert(); + assert!(result_success.is_ok()); + + // make all beacon node offline and the result should error + let _mock3 = mock_beacon_node_3.mock_offline_node(); + + let result_failure = beacon_node_fallback + .first_success(|client| async move { client.get_node_version().await }) + .await; + + assert!(result_failure.is_err()); + + // Both mock1 and mock2 should be called 3 times: + // - the first time is for the result_success case, + // - the second time is when it calls all 3 mock beacon nodes and all fails in the first pass, + // - which gives the third call because the function gives a second pass if no candidates succeeded in the first pass + mock1.expect(3).assert(); + mock2.expect(3).assert(); + } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index b2d1ebb3c2..dfcd2064e5 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -1,490 +1,478 @@ -use clap::{builder::ArgPredicate, Arg, ArgAction, Command}; -use clap_utils::{get_color_style, FLAG_HEADER}; +use beacon_node_fallback::ApiTopic; +use clap::builder::ArgPredicate; +pub use clap::{FromArgMatches, Parser}; +use clap_utils::get_color_style; +use clap_utils::FLAG_HEADER; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use types::Address; -pub fn cli_app() -> Command { - Command::new("validator_client") - .visible_aliases(["v", "vc", "validator"]) - .styles(get_color_style()) - .display_order(0) - .about( - "When connected to a beacon node, performs the duties of a staked \ +#[derive(Parser, Clone, Deserialize, Serialize, Debug)] +#[clap( + name = "validator_client", + visible_aliases = &["v", "vc", "validator"], + about = "When connected to a beacon node, performs the duties of a staked \ validator (e.g., proposing blocks and attestations).", - ) - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - ) - .arg( - Arg::new("beacon-nodes") - .long("beacon-nodes") - .value_name("NETWORK_ADDRESSES") - .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ - Default is http://localhost:5052." - ) - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("proposer-nodes") - .long("proposer-nodes") - .value_name("NETWORK_ADDRESSES") - .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ - These specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes." - ) - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("broadcast") - .long("broadcast") - .value_name("API_TOPICS") - .help("Comma-separated list of beacon API topics to broadcast to all beacon nodes. \ - Possible values are: none, attestations, blocks, subscriptions, \ - sync-committee. Default (when flag is omitted) is to broadcast \ - subscriptions only." - ) - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("validators-dir") - .long("validators-dir") - .alias("validator-dir") - .value_name("VALIDATORS_DIR") - .help( - "The directory which contains the validator keystores, deposit data for \ - each validator along with the common slashing protection database \ - and the validator_definitions.yml" - ) - .action(ArgAction::Set) - .conflicts_with("datadir") - .display_order(0) - ) - .arg( - Arg::new("secrets-dir") - .long("secrets-dir") - .value_name("SECRETS_DIRECTORY") - .help( - "The directory which contains the password to unlock the validator \ - voting keypairs. Each password should be contained in a file where the \ - name is the 0x-prefixed hex representation of the validators voting public \ - key. Defaults to ~/.lighthouse/{network}/secrets.", - ) - .action(ArgAction::Set) - .conflicts_with("datadir") - .display_order(0) - ) - .arg( - Arg::new("init-slashing-protection") - .long("init-slashing-protection") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .help( - "If present, do not require the slashing protection database to exist before \ - running. You SHOULD NOT use this flag unless you're certain that a new \ - slashing protection database is required. Usually, your database \ - will have been initialized when you imported your validator keys. If you \ - misplace your database and then run with this flag you risk being slashed." - ) - .display_order(0) - ) - .arg( - Arg::new("disable-auto-discover") - .long("disable-auto-discover") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .help( - "If present, do not attempt to discover new validators in the validators-dir. Validators \ - will need to be manually added to the validator_definitions.yml file." - ) - .display_order(0) - ) - .arg( - Arg::new("use-long-timeouts") - .long("use-long-timeouts") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .help("If present, the validator client will use longer timeouts for requests \ - made to the beacon node. This flag is generally not recommended, \ - longer timeouts can cause missed duties when fallbacks are used.") - .display_order(0) - ) - .arg( - Arg::new("beacon-nodes-tls-certs") - .long("beacon-nodes-tls-certs") - .value_name("CERTIFICATE-FILES") - .action(ArgAction::Set) - .help("Comma-separated paths to custom TLS certificates to use when connecting \ - to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ - in addition to the OS trust store. Commas must only be used as a \ - delimiter, and must not be part of the certificate path.") - .display_order(0) - ) - // This overwrites the graffiti configured in the beacon node. - .arg( - Arg::new("graffiti") - .long("graffiti") - .help("Specify your custom graffiti to be included in blocks.") - .value_name("GRAFFITI") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("graffiti-file") - .long("graffiti-file") - .help("Specify a graffiti file to load validator graffitis from.") - .value_name("GRAFFITI-FILE") - .action(ArgAction::Set) - .conflicts_with("graffiti") - .display_order(0) - ) - .arg( - Arg::new("suggested-fee-recipient") - .long("suggested-fee-recipient") - .help("Once the merge has happened, this address will receive transaction fees \ - from blocks proposed by this validator client. If a fee recipient is \ - configured in the validator definitions it takes priority over this value.") - .value_name("FEE-RECIPIENT") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("distributed") - .long("distributed") - .help("Enables functionality required for running the validator in a distributed validator cluster.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - /* REST API related arguments */ - .arg( - Arg::new("http") - .long("http") - .help("Enable the RESTful HTTP API server. Disabled by default.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - /* - * Note: The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is - * unsafe to publish on a public network. - * - * If the `--http-address` flag is used, the `--unencrypted-http-transport` flag - * must also be used in order to make it clear to the user that this is unsafe. - */ - .arg( - Arg::new("http-address") - .long("http-address") - .requires("http") - .value_name("ADDRESS") - .help("Set the address for the HTTP address. The HTTP server is not encrypted \ - and therefore it is unsafe to publish on a public network. When this \ - flag is used, it additionally requires the explicit use of the \ - `--unencrypted-http-transport` flag to ensure the user is aware of the \ - risks involved. For access via the Internet, users should apply \ - transport-layer security like a HTTPS reverse-proxy or SSH tunnelling.") - .requires("unencrypted-http-transport") - .display_order(0) - ) - .arg( - Arg::new("unencrypted-http-transport") - .long("unencrypted-http-transport") - .help("This is a safety flag to ensure that the user is aware that the http \ - transport is unencrypted and using a custom HTTP address is unsafe.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .requires("http-address") - .display_order(0) - ) - .arg( - Arg::new("http-port") - .long("http-port") - .requires("http") - .value_name("PORT") - .help("Set the listen TCP port for the RESTful HTTP API server.") - .default_value_if("http", ArgPredicate::IsPresent, "5062") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("http-allow-origin") - .long("http-allow-origin") - .requires("http") - .value_name("ORIGIN") - .help("Set the value of the Access-Control-Allow-Origin response HTTP header. \ + styles = get_color_style(), + next_line_help = true, + term_width = 80, + disable_help_flag = true, + disable_help_subcommand = true, + display_order = 0, +)] +pub struct ValidatorClient { + #[clap( + long, + value_name = "NETWORK_ADDRESSES", + value_delimiter = ',', + help = "Comma-separated addresses to one or more beacon node HTTP APIs. \ + Default is http://localhost:5052.", + display_order = 0 + )] + pub beacon_nodes: Option>, + + #[clap( + long, + value_name = "NETWORK_ADDRESSES", + value_delimiter = ',', + help = "Comma-separated addresses to one or more beacon node HTTP APIs. \ + These specify nodes that are used to send beacon block proposals. \ + A failure will revert back to the standard beacon nodes specified in --beacon-nodes.", + display_order = 0 + )] + pub proposer_nodes: Option>, + + #[clap( + long, + value_name = "API_TOPICS", + value_delimiter = ',', + help = "Comma-separated list of beacon API topics to broadcast to all beacon nodes. \ + Default (when flag is omitted) is to broadcast subscriptions only.", + display_order = 0 + )] + pub broadcast: Option>, + + #[clap( + long, + alias = "validator-dir", + value_name = "VALIDATORS_DIR", + conflicts_with = "datadir", + help = "The directory which contains the validator keystores, deposit data for \ + each validator along with the common slashing protection database \ + and the validator_definitions.yml", + display_order = 0 + )] + pub validators_dir: Option, + + #[clap( + long, + value_name = "SECRETS_DIRECTORY", + conflicts_with = "datadir", + help = "The directory which contains the password to unlock the validator \ + voting keypairs. Each password should be contained in a file where the \ + name is the 0x-prefixed hex representation of the validators voting public \ + key. Defaults to ~/.lighthouse/{network}/secrets.", + display_order = 0 + )] + pub secrets_dir: Option, + + #[clap( + long, + help = "If present, do not require the slashing protection database to exist before \ + running. You SHOULD NOT use this flag unless you're certain that a new \ + slashing protection database is required. Usually, your database \ + will have been initialized when you imported your validator keys. If you \ + misplace your database and then run with this flag you risk being slashed.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub init_slashing_protection: bool, + + #[clap( + long, + help = "If present, do not attempt to discover new validators in the validators-dir. Validators \ + will need to be manually added to the validator_definitions.yml file.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub disable_auto_discover: bool, + + #[clap( + long, + help = "If present, the validator client will use longer timeouts for requests \ + made to the beacon node. This flag is generally not recommended, \ + longer timeouts can cause missed duties when fallbacks are used.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub use_long_timeouts: bool, + + #[clap( + long, + value_name = "CERTIFICATE-FILES", + value_delimiter = ',', + help = "Comma-separated paths to custom TLS certificates to use when connecting \ + to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ + in addition to the OS trust store. Commas must only be used as a \ + delimiter, and must not be part of the certificate path.", + display_order = 0 + )] + pub beacon_nodes_tls_certs: Option>, + + // This overwrites the graffiti configured in the beacon node. + #[clap( + long, + value_name = "GRAFFITI", + help = "Specify your custom graffiti to be included in blocks.", + display_order = 0 + )] + pub graffiti: Option, + + #[clap( + long, + value_name = "GRAFFITI-FILE", + conflicts_with = "graffiti", + help = "Specify a graffiti file to load validator graffitis from.", + display_order = 0 + )] + pub graffiti_file: Option, + + #[clap( + long, + value_name = "FEE-RECIPIENT", + help = "Once the merge has happened, this address will receive transaction fees \ + from blocks proposed by this validator client. If a fee recipient is \ + configured in the validator definitions it takes priority over this value.", + display_order = 0 + )] + pub suggested_fee_recipient: Option
, + + #[clap( + long, + help = "Enables functionality required for running the validator in a distributed validator cluster.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub distributed: bool, + + /* REST API related arguments */ + #[clap( + long, + help = "Enable the RESTful HTTP API server. Disabled by default.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub http: bool, + + /* + * Note: The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is + * unsafe to publish on a public network. + * + * If the `--http-address` flag is used, the `--unencrypted-http-transport` flag + * must also be used in order to make it clear to the user that this is unsafe. + */ + #[clap( + long, + value_name = "ADDRESS", + requires = "unencrypted_http_transport", + help = "Set the address for the HTTP address. The HTTP server is not encrypted \ + and therefore it is unsafe to publish on a public network. When this \ + flag is used, it additionally requires the explicit use of the \ + `--unencrypted-http-transport` flag to ensure the user is aware of the \ + risks involved. For access via the Internet, users should apply \ + transport-layer security like a HTTPS reverse-proxy or SSH tunnelling.", + display_order = 0 + )] + pub http_address: Option, + + #[clap( + long, + requires = "http_address", + help = "This is a safety flag to ensure that the user is aware that the http \ + transport is unencrypted and using a custom HTTP address is unsafe.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub unencrypted_http_transport: bool, + + #[clap( + long, + value_name = "PORT", + default_value_t = 5062, + help = "Set the listen TCP port for the RESTful HTTP API server.", + display_order = 0 + )] + pub http_port: u16, + + #[clap( + long, + value_name = "ORIGIN", + help = "Set the value of the Access-Control-Allow-Origin response HTTP header. \ Use * to allow any origin (not recommended in production). \ If no value is supplied, the CORS allowed origin is set to the listen \ - address of this server (e.g., http://localhost:5062).") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("http-allow-keystore-export") - .long("http-allow-keystore-export") - .requires("http") - .help("If present, allow access to the DELETE /lighthouse/keystores HTTP \ - API method, which allows exporting keystores and passwords to HTTP API \ - consumers who have access to the API token. This method is useful for \ - exporting validators, however it should be used with caution since it \ - exposes private key data to authorized users.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("http-store-passwords-in-secrets-dir") - .long("http-store-passwords-in-secrets-dir") - .requires("http") - .help("If present, any validators created via the HTTP will have keystore \ - passwords stored in the secrets-dir rather than the validator \ - definitions file.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("http-token-path") - .long("http-token-path") - .requires("http") - .value_name("HTTP_TOKEN_PATH") - .help( - "Path to file containing the HTTP API token for validator client authentication. \ - If not specified, defaults to {validators-dir}/api-token.txt." - ) - .action(ArgAction::Set) - .display_order(0) - ) - /* Prometheus metrics HTTP server related arguments */ - .arg( - Arg::new("metrics") - .long("metrics") - .help("Enable the Prometheus metrics HTTP server. Disabled by default.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("metrics-address") - .long("metrics-address") - .requires("metrics") - .value_name("ADDRESS") - .help("Set the listen address for the Prometheus metrics HTTP server.") - .default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("metrics-port") - .long("metrics-port") - .requires("metrics") - .value_name("PORT") - .help("Set the listen TCP port for the Prometheus metrics HTTP server.") - .default_value_if("metrics", ArgPredicate::IsPresent, "5064") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("metrics-allow-origin") - .long("metrics-allow-origin") - .requires("metrics") - .value_name("ORIGIN") - .help("Set the value of the Access-Control-Allow-Origin response HTTP header. \ - Use * to allow any origin (not recommended in production). \ - If no value is supplied, the CORS allowed origin is set to the listen \ - address of this server (e.g., http://localhost:5064).") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("enable-high-validator-count-metrics") - .long("enable-high-validator-count-metrics") - .help("Enable per validator metrics for > 64 validators. \ - Note: This flag is automatically enabled for <= 64 validators. \ - Enabling this metric for higher validator counts will lead to higher volume \ - of prometheus metrics being collected.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - /* - * Explorer metrics - */ - .arg( - Arg::new("monitoring-endpoint") - .long("monitoring-endpoint") - .value_name("ADDRESS") - .help("Enables the monitoring service for sending system metrics to a remote endpoint. \ + address of this server (e.g., http://localhost:5062).", + display_order = 0 + )] + pub http_allow_origin: Option, + + #[clap( + long, + requires = "http", + help = "If present, allow access to the DELETE /lighthouse/keystores HTTP \ + API method, which allows exporting keystores and passwords to HTTP API \ + consumers who have access to the API token. This method is useful for \ + exporting validators, however it should be used with caution since it \ + exposes private key data to authorized users.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub http_allow_keystore_export: bool, + + #[clap( + long, + requires = "http", + help = "If present, any validators created via the HTTP will have keystore \ + passwords stored in the secrets-dir rather than the validator \ + definitions file.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub http_store_passwords_in_secrets_dir: bool, + + #[clap( + long, + requires = "http", + help = "Path to file containing the HTTP API token for validator client authentication. \ + If not specified, defaults to {validators-dir}/api-token.txt.", + display_order = 0 + )] + pub http_token_path: Option, + + /* Prometheus metrics HTTP server related arguments */ + #[clap( + long, + help = "Enable the Prometheus metrics HTTP server. Disabled by default.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub metrics: bool, + + #[clap( + long, + value_name = "ADDRESS", + requires = "metrics", + default_value_if("metrics", ArgPredicate::IsPresent, "127.0.0.1"), + help = "Set the listen address for the Prometheus metrics HTTP server. [default: 127.0.0.1]", + display_order = 0 + )] + pub metrics_address: Option, + + #[clap( + long, + value_name = "PORT", + requires = "metrics", + default_value_t = 5064, + help = "Set the listen TCP port for the Prometheus metrics HTTP server.", + display_order = 0 + )] + pub metrics_port: u16, + + #[clap( + long, + value_name = "ORIGIN", + requires = "metrics", + help = "Set the value of the Access-Control-Allow-Origin response HTTP header. \ + Use * to allow any origin (not recommended in production). \ + If no value is supplied, the CORS allowed origin is set to the listen \ + address of this server (e.g., http://localhost:5064).", + display_order = 0 + )] + pub metrics_allow_origin: Option, + + #[clap( + long, + help = "Enable per validator metrics for > 64 validators. \ + Note: This flag is automatically enabled for <= 64 validators. \ + Enabling this metric for higher validator counts will lead to higher volume \ + of prometheus metrics being collected.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub enable_high_validator_count_metrics: bool, + + /* Explorer metrics */ + #[clap( + long, + value_name = "ADDRESS", + help = "Enables the monitoring service for sending system metrics to a remote endpoint. \ This can be used to monitor your setup on certain services (e.g. beaconcha.in). \ This flag sets the endpoint where the beacon node metrics will be sent. \ Note: This will send information to a remote sever which may identify and associate your \ validators, IP address and other personal information. Always use a HTTPS connection \ - and never provide an untrusted URL.") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("monitoring-endpoint-period") - .long("monitoring-endpoint-period") - .value_name("SECONDS") - .help("Defines how many seconds to wait between each message sent to \ - the monitoring-endpoint. Default: 60s") - .requires("monitoring-endpoint") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("enable-doppelganger-protection") - .long("enable-doppelganger-protection") - .value_name("ENABLE_DOPPELGANGER_PROTECTION") - .help("If this flag is set, Lighthouse will delay startup for three epochs and \ - monitor for messages on the network by any of the validators managed by this \ - client. This will result in three (possibly four) epochs worth of missed \ - attestations. If an attestation is detected during this period, it means it is \ - very likely that you are running a second validator client with the same keys. \ - This validator client will immediately shutdown if this is detected in order \ - to avoid potentially committing a slashable offense. Use this flag in order to \ - ENABLE this functionality, without this flag Lighthouse will begin attesting \ - immediately.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("builder-proposals") - .long("builder-proposals") - .alias("private-tx-proposals") - .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ - headers during proposals and will sign over headers. Useful for outsourcing \ - execution payload construction during proposals.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("builder-registration-timestamp-override") - .long("builder-registration-timestamp-override") - .alias("builder-registration-timestamp-override") - .help("This flag takes a unix timestamp value that will be used to override the \ - timestamp used in the builder api registration") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("gas-limit") - .long("gas-limit") - .value_name("INTEGER") - .action(ArgAction::Set) - .help("The gas limit to be used in all builder proposals for all validators managed \ - by this validator client. Note this will not necessarily be used if the gas limit \ - set here moves too far from the previous block's gas limit. [default: 30,000,000]") - .requires("builder-proposals") - .display_order(0) - ) - .arg( - Arg::new("disable-latency-measurement-service") - .long("disable-latency-measurement-service") - .help("Disables the service that periodically attempts to measure latency to BNs.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("validator-registration-batch-size") - .long("validator-registration-batch-size") - .value_name("INTEGER") - .help("Defines the number of validators per \ - validator/register_validator request sent to the BN. This value \ - can be reduced to avoid timeouts from builders.") - .default_value("500") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("builder-boost-factor") - .long("builder-boost-factor") - .value_name("UINT64") - .help("Defines the boost factor, \ - a percentage multiplier to apply to the builder's payload value \ - when choosing between a builder payload header and payload from \ - the local execution node.") - .conflicts_with("prefer-builder-proposals") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("prefer-builder-proposals") - .long("prefer-builder-proposals") - .help("If this flag is set, Lighthouse will always prefer blocks \ - constructed by builders, regardless of payload value.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("beacon-nodes-sync-tolerances") - .long("beacon-nodes-sync-tolerances") - .value_name("SYNC_TOLERANCES") - .help("A comma-separated list of 3 values which sets the size of each sync distance range when \ - determining the health of each connected beacon node. \ - The first value determines the `Synced` range. \ - If a connected beacon node is synced to within this number of slots it is considered 'Synced'. \ - The second value determines the `Small` sync distance range. \ - This range starts immediately after the `Synced` range. \ - The third value determines the `Medium` sync distance range. \ - This range starts immediately after the `Small` range. \ - Any sync distance value beyond that is considered `Large`. \ - For example, a value of `8,8,48` would have ranges like the following: \ - `Synced`: 0..=8 \ - `Small`: 9..=16 \ - `Medium`: 17..=64 \ - `Large`: 65.. \ - These values are used to determine what ordering beacon node fallbacks are used in. \ - Generally, `Synced` nodes are preferred over `Small` and so on. \ - Nodes in the `Synced` range will tie-break based on their ordering in `--beacon-nodes`. \ - This ensures the primary beacon node is prioritised. \ - [default: 8,8,48]") - .action(ArgAction::Set) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - .arg( - Arg::new("disable-slashing-protection-web3signer") - .long("disable-slashing-protection-web3signer") - .help("Disable Lighthouse's slashing protection for all web3signer keys. This can \ - reduce the I/O burden on the VC but is only safe if slashing protection \ - is enabled on the remote signer and is implemented correctly. DO NOT ENABLE \ - THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON \ - THE REMOTE SIGNER. YOU WILL GET SLASHED IF YOU USE THIS FLAG WITHOUT \ - ENABLING WEB3SIGNER'S SLASHING PROTECTION.") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - ) - /* - * Experimental/development options. - */ - .arg( - Arg::new("web3-signer-keep-alive-timeout") - .long("web3-signer-keep-alive-timeout") - .value_name("MILLIS") - .default_value("20000") - .help("Keep-alive timeout for each web3signer connection. Set to 'null' to never \ - timeout") - .action(ArgAction::Set) - .display_order(0) - ) - .arg( - Arg::new("web3-signer-max-idle-connections") - .long("web3-signer-max-idle-connections") - .value_name("COUNT") - .help("Maximum number of idle connections to maintain per web3signer host. Default \ - is unlimited.") - .action(ArgAction::Set) - .display_order(0) - ) + and never provide an untrusted URL.", + display_order = 0 + )] + pub monitoring_endpoint: Option, + + #[clap( + long, + value_name = "SECONDS", + requires = "monitoring_endpoint", + default_value_t = 60, + help = "Defines how many seconds to wait between each message sent to \ + the monitoring-endpoint.", + display_order = 0 + )] + pub monitoring_endpoint_period: u64, + + #[clap( + long, + value_name = "BOOLEAN", + help = "If this flag is set, Lighthouse will delay startup for three epochs and \ + monitor for messages on the network by any of the validators managed by this \ + client. This will result in three (possibly four) epochs worth of missed \ + attestations. If an attestation is detected during this period, it means it is \ + very likely that you are running a second validator client with the same keys. \ + This validator client will immediately shutdown if this is detected in order \ + to avoid potentially committing a slashable offense. Use this flag in order to \ + ENABLE this functionality, without this flag Lighthouse will begin attesting \ + immediately.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub enable_doppelganger_protection: bool, + + #[clap( + long, + alias = "private-tx-proposals", + help = "If this flag is set, Lighthouse will query the Beacon Node for only block \ + headers during proposals and will sign over headers. Useful for outsourcing \ + execution payload construction during proposals.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub builder_proposals: bool, + + #[clap( + long, + value_name = "UNIX-TIMESTAMP", + help = "This flag takes a unix timestamp value that will be used to override the \ + timestamp used in the builder api registration.", + display_order = 0 + )] + pub builder_registration_timestamp_override: Option, + + #[clap( + long, + value_name = "INTEGER", + default_value_t = 30_000_000, + requires = "builder_proposals", + help = "The gas limit to be used in all builder proposals for all validators managed \ + by this validator client. Note this will not necessarily be used if the gas limit \ + set here moves too far from the previous block's gas limit.", + display_order = 0 + )] + pub gas_limit: u64, + + #[clap( + long, + value_name = "BOOLEAN", + help = "Disables the service that periodically attempts to measure latency to BNs.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub disable_latency_measurement_service: bool, + + #[clap( + long, + value_name = "INTEGER", + default_value_t = 500, + help = "Defines the number of validators per \ + validator/register_validator request sent to the BN. This value \ + can be reduced to avoid timeouts from builders.", + display_order = 0 + )] + pub validator_registration_batch_size: usize, + + #[clap( + long, + value_name = "UINT64", + help = "Defines the boost factor, \ + a percentage multiplier to apply to the builder's payload value \ + when choosing between a builder payload header and payload from \ + the local execution node.", + conflicts_with = "prefer_builder_proposals", + display_order = 0 + )] + pub builder_boost_factor: Option, + + #[clap( + long, + help = "If this flag is set, Lighthouse will always prefer blocks \ + constructed by builders, regardless of payload value.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub prefer_builder_proposals: bool, + + #[clap( + long, + help = "A comma-separated list of 3 values which sets the size of each sync distance range when \ + determining the health of each connected beacon node. \ + The first value determines the `Synced` range. \ + If a connected beacon node is synced to within this number of slots it is considered 'Synced'. \ + The second value determines the `Small` sync distance range. \ + This range starts immediately after the `Synced` range. \ + The third value determines the `Medium` sync distance range. \ + This range starts immediately after the `Small` range. \ + Any sync distance value beyond that is considered `Large`. \ + For example, a value of `8,8,48` would have ranges like the following: \ + `Synced`: 0..=8 \ + `Small`: 9..=16 \ + `Medium`: 17..=64 \ + `Large`: 65.. \ + These values are used to determine what ordering beacon node fallbacks are used in. \ + Generally, `Synced` nodes are preferred over `Small` and so on. \ + Nodes in the `Synced` range will tie-break based on their ordering in `--beacon-nodes`. \ + This ensures the primary beacon node is prioritised.", + display_order = 0, + value_delimiter = ',', + default_value = "8,8,48", + help_heading = FLAG_HEADER, + value_name = "SYNC_TOLERANCES" + )] + pub beacon_nodes_sync_tolerances: Vec, + + #[clap( + long, + help = "Disable Lighthouse's slashing protection for all web3signer keys. This can \ + reduce the I/O burden on the VC but is only safe if slashing protection \ + is enabled on the remote signer and is implemented correctly. DO NOT ENABLE \ + THIS FLAG UNLESS YOU ARE CERTAIN THAT SLASHING PROTECTION IS ENABLED ON \ + THE REMOTE SIGNER. YOU WILL GET SLASHED IF YOU USE THIS FLAG WITHOUT \ + ENABLING WEB3SIGNER'S SLASHING PROTECTION.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub disable_slashing_protection_web3signer: bool, + + /* Experimental/development options */ + #[clap( + long, + value_name = "MILLIS", + default_value_t = 20000, + help = "Keep-alive timeout for each web3signer connection. Set to '0' to never \ + timeout.", + display_order = 0 + )] + pub web3_signer_keep_alive_timeout: u64, + + #[clap( + long, + value_name = "COUNT", + help = "Maximum number of idle connections to maintain per web3signer host. Default \ + is unlimited.", + display_order = 0 + )] + pub web3_signer_max_idle_connections: Option, } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 5ea4c23bc4..20fa3ffe5a 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,6 +1,8 @@ -use beacon_node_fallback::{beacon_node_health::BeaconNodeSyncDistanceTiers, ApiTopic}; +use crate::cli::ValidatorClient; +use beacon_node_fallback::beacon_node_health::BeaconNodeSyncDistanceTiers; +use beacon_node_fallback::ApiTopic; use clap::ArgMatches; -use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_optional, parse_required}; +use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, parse_required}; use directory::{ get_network_dir, DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, DEFAULT_VALIDATOR_DIR, @@ -15,9 +17,8 @@ use slog::{info, warn, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; -use std::str::FromStr; use std::time::Duration; -use types::{Address, GRAFFITI_BYTES_LEN}; +use types::GRAFFITI_BYTES_LEN; use validator_http_api::{self, PK_FILENAME}; use validator_http_metrics; @@ -132,7 +133,11 @@ impl Default for Config { impl Config { /// Returns a `Default` implementation of `Self` with some parameters modified by the supplied /// `cli_args`. - pub fn from_cli(cli_args: &ArgMatches, log: &Logger) -> Result { + pub fn from_cli( + cli_args: &ArgMatches, + validator_client_config: &ValidatorClient, + log: &Logger, + ) -> Result { let mut config = Config::default(); let default_root_dir = dirs::home_dir() @@ -145,11 +150,12 @@ impl Config { validator_dir = Some(base_dir.join(DEFAULT_VALIDATOR_DIR)); secrets_dir = Some(base_dir.join(DEFAULT_SECRET_DIR)); } - if cli_args.get_one::("validators-dir").is_some() { - validator_dir = Some(parse_required(cli_args, "validators-dir")?); + + if let Some(validator_dir_path) = validator_client_config.validators_dir.as_ref() { + validator_dir = Some(validator_dir_path.clone()); } - if cli_args.get_one::("secrets-dir").is_some() { - secrets_dir = Some(parse_required(cli_args, "secrets-dir")?); + if let Some(secrets_dir_path) = validator_client_config.secrets_dir.as_ref() { + secrets_dir = Some(secrets_dir_path.clone()); } config.validator_dir = validator_dir.unwrap_or_else(|| { @@ -169,35 +175,36 @@ impl Config { .map_err(|e| format!("Failed to create {:?}: {:?}", config.validator_dir, e))?; } - if let Some(beacon_nodes) = parse_optional::(cli_args, "beacon-nodes")? { + if let Some(beacon_nodes) = validator_client_config.beacon_nodes.as_ref() { config.beacon_nodes = beacon_nodes - .split(',') - .map(SensitiveUrl::parse) + .iter() + .map(|s| SensitiveUrl::parse(s)) .collect::>() .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; } - if let Some(proposer_nodes) = parse_optional::(cli_args, "proposer-nodes")? { + + if let Some(proposer_nodes) = validator_client_config.proposer_nodes.as_ref() { config.proposer_nodes = proposer_nodes - .split(',') - .map(SensitiveUrl::parse) + .iter() + .map(|s| SensitiveUrl::parse(s)) .collect::>() .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; } - config.disable_auto_discover = cli_args.get_flag("disable-auto-discover"); - config.init_slashing_protection = cli_args.get_flag("init-slashing-protection"); - config.use_long_timeouts = cli_args.get_flag("use-long-timeouts"); + config.disable_auto_discover = validator_client_config.disable_auto_discover; + config.init_slashing_protection = validator_client_config.init_slashing_protection; + config.use_long_timeouts = validator_client_config.use_long_timeouts; - if let Some(graffiti_file_path) = cli_args.get_one::("graffiti-file") { + if let Some(graffiti_file_path) = validator_client_config.graffiti_file.as_ref() { let mut graffiti_file = GraffitiFile::new(graffiti_file_path.into()); graffiti_file .read_graffiti_file() .map_err(|e| format!("Error reading graffiti file: {:?}", e))?; config.graffiti_file = Some(graffiti_file); - info!(log, "Successfully loaded graffiti file"; "path" => graffiti_file_path); + info!(log, "Successfully loaded graffiti file"; "path" => graffiti_file_path.to_str()); } - if let Some(input_graffiti) = cli_args.get_one::("graffiti") { + if let Some(input_graffiti) = validator_client_config.graffiti.as_ref() { let graffiti_bytes = input_graffiti.as_bytes(); if graffiti_bytes.len() > GRAFFITI_BYTES_LEN { return Err(format!( @@ -216,55 +223,40 @@ impl Config { } } - if let Some(input_fee_recipient) = - parse_optional::
(cli_args, "suggested-fee-recipient")? - { + if let Some(input_fee_recipient) = validator_client_config.suggested_fee_recipient { config.validator_store.fee_recipient = Some(input_fee_recipient); } - if let Some(tls_certs) = parse_optional::(cli_args, "beacon-nodes-tls-certs")? { - config.beacon_nodes_tls_certs = Some(tls_certs.split(',').map(PathBuf::from).collect()); + if let Some(tls_certs) = validator_client_config.beacon_nodes_tls_certs.as_ref() { + config.beacon_nodes_tls_certs = Some(tls_certs.iter().map(PathBuf::from).collect()); } - if cli_args.get_flag("distributed") { - config.distributed = true; - } + config.distributed = validator_client_config.distributed; - if let Some(broadcast_topics) = cli_args.get_one::("broadcast") { - config.broadcast_topics = broadcast_topics - .split(',') - .filter(|t| *t != "none") - .map(|t| { - t.trim() - .parse::() - .map_err(|_| format!("Unknown API topic to broadcast: {t}")) - }) - .collect::>()?; + if let Some(mut broadcast_topics) = validator_client_config.broadcast.clone() { + broadcast_topics.retain(|topic| *topic != ApiTopic::None); + config.broadcast_topics = broadcast_topics; } /* * Beacon node fallback */ - if let Some(sync_tolerance) = cli_args.get_one::("beacon-nodes-sync-tolerances") { - config.beacon_node_fallback.sync_tolerances = - BeaconNodeSyncDistanceTiers::from_str(sync_tolerance)?; - } else { - config.beacon_node_fallback.sync_tolerances = BeaconNodeSyncDistanceTiers::default(); - } + config.beacon_node_fallback.sync_tolerances = BeaconNodeSyncDistanceTiers::from_vec( + &validator_client_config.beacon_nodes_sync_tolerances, + )?; /* * Web3 signer */ - if let Some(s) = parse_optional::(cli_args, "web3-signer-keep-alive-timeout")? { - config.initialized_validators.web3_signer_keep_alive_timeout = if s == "null" { - None - } else { - Some(Duration::from_millis( - s.parse().map_err(|_| "invalid timeout value".to_string())?, - )) - } + if validator_client_config.web3_signer_keep_alive_timeout == 0 { + config.initialized_validators.web3_signer_keep_alive_timeout = None + } else { + config.initialized_validators.web3_signer_keep_alive_timeout = Some( + Duration::from_millis(validator_client_config.web3_signer_keep_alive_timeout), + ); } - if let Some(n) = parse_optional::(cli_args, "web3-signer-max-idle-connections")? { + + if let Some(n) = validator_client_config.web3_signer_max_idle_connections { config .initialized_validators .web3_signer_max_idle_connections = Some(n); @@ -274,12 +266,10 @@ impl Config { * Http API server */ - if cli_args.get_flag("http") { - config.http_api.enabled = true; - } + config.http_api.enabled = validator_client_config.http; - if let Some(address) = cli_args.get_one::("http-address") { - if cli_args.get_flag("unencrypted-http-transport") { + if let Some(address) = &validator_client_config.http_address { + if validator_client_config.unencrypted_http_transport { config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IP address.")?; @@ -291,13 +281,9 @@ impl Config { } } - if let Some(port) = cli_args.get_one::("http-port") { - config.http_api.listen_port = port - .parse::() - .map_err(|_| "http-port is not a valid u16.")?; - } + config.http_api.listen_port = validator_client_config.http_port; - if let Some(allow_origin) = cli_args.get_one::("http-allow-origin") { + if let Some(allow_origin) = validator_client_config.http_allow_origin.as_ref() { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -306,15 +292,11 @@ impl Config { config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.get_flag("http-allow-keystore-export") { - config.http_api.allow_keystore_export = true; - } + config.http_api.allow_keystore_export = validator_client_config.http_allow_keystore_export; + config.http_api.store_passwords_in_secrets_dir = + validator_client_config.http_store_passwords_in_secrets_dir; - if cli_args.get_flag("http-store-passwords-in-secrets-dir") { - config.http_api.store_passwords_in_secrets_dir = true; - } - - if let Some(http_token_path) = cli_args.get_one::("http-token-path") { + if let Some(http_token_path) = &validator_client_config.http_token_path { config.http_api.http_token_path = PathBuf::from(http_token_path); } else { // For backward compatibility, default to the path under the validator dir if not provided. @@ -325,27 +307,19 @@ impl Config { * Prometheus metrics HTTP server */ - if cli_args.get_flag("metrics") { - config.http_metrics.enabled = true; - } + config.http_metrics.enabled = validator_client_config.metrics; + config.enable_high_validator_count_metrics = + validator_client_config.enable_high_validator_count_metrics; - if cli_args.get_flag("enable-high-validator-count-metrics") { - config.enable_high_validator_count_metrics = true; - } - - if let Some(address) = cli_args.get_one::("metrics-address") { - config.http_metrics.listen_addr = address + if let Some(metrics_address) = &validator_client_config.metrics_address { + config.http_metrics.listen_addr = metrics_address .parse::() .map_err(|_| "metrics-address is not a valid IP address.")?; } - if let Some(port) = cli_args.get_one::("metrics-port") { - config.http_metrics.listen_port = port - .parse::() - .map_err(|_| "metrics-port is not a valid u16.")?; - } + config.http_metrics.listen_port = validator_client_config.metrics_port; - if let Some(allow_origin) = cli_args.get_one::("metrics-allow-origin") { + if let Some(allow_origin) = validator_client_config.metrics_allow_origin.as_ref() { // Pre-validate the config value to give feedback to the user on node startup, instead of // as late as when the first API response is produced. hyper::header::HeaderValue::from_str(allow_origin) @@ -361,9 +335,8 @@ impl Config { /* * Explorer metrics */ - if let Some(monitoring_endpoint) = cli_args.get_one::("monitoring-endpoint") { - let update_period_secs = - clap_utils::parse_optional(cli_args, "monitoring-endpoint-period")?; + if let Some(monitoring_endpoint) = validator_client_config.monitoring_endpoint.as_ref() { + let update_period_secs = Some(validator_client_config.monitoring_endpoint_period); config.monitoring_api = Some(monitoring_api::Config { db_path: None, freezer_db_path: None, @@ -372,56 +345,34 @@ impl Config { }); } - if cli_args.get_flag("enable-doppelganger-protection") { - config.enable_doppelganger_protection = true; - } + config.enable_doppelganger_protection = + validator_client_config.enable_doppelganger_protection; + config.validator_store.builder_proposals = validator_client_config.builder_proposals; + config.validator_store.prefer_builder_proposals = + validator_client_config.prefer_builder_proposals; + config.validator_store.gas_limit = Some(validator_client_config.gas_limit); - if cli_args.get_flag("builder-proposals") { - config.validator_store.builder_proposals = true; - } - - if cli_args.get_flag("prefer-builder-proposals") { - config.validator_store.prefer_builder_proposals = true; - } - - config.validator_store.gas_limit = cli_args - .get_one::("gas-limit") - .map(|gas_limit| { - gas_limit - .parse::() - .map_err(|_| "gas-limit is not a valid u64.") - }) - .transpose()?; - - if let Some(registration_timestamp_override) = - cli_args.get_one::("builder-registration-timestamp-override") - { - config.builder_registration_timestamp_override = Some( - registration_timestamp_override - .parse::() - .map_err(|_| "builder-registration-timestamp-override is not a valid u64.")?, - ); - } - - config.validator_store.builder_boost_factor = - parse_optional(cli_args, "builder-boost-factor")?; + config.builder_registration_timestamp_override = + validator_client_config.builder_registration_timestamp_override; + config.validator_store.builder_boost_factor = validator_client_config.builder_boost_factor; config.enable_latency_measurement_service = - !cli_args.get_flag("disable-latency-measurement-service"); + !validator_client_config.disable_latency_measurement_service; config.validator_registration_batch_size = - parse_required(cli_args, "validator-registration-batch-size")?; + validator_client_config.validator_registration_batch_size; + if config.validator_registration_batch_size == 0 { return Err("validator-registration-batch-size cannot be 0".to_string()); } config.validator_store.enable_web3signer_slashing_protection = - if cli_args.get_flag("disable-slashing-protection-web3signer") { + if validator_client_config.disable_slashing_protection_web3signer { warn!( log, "Slashing protection for remote keys disabled"; "info" => "ensure slashing protection on web3signer is enabled or you WILL \ - get slashed" + get slashed" ); false } else { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 5adfdf3349..3bad63a50b 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -1,9 +1,9 @@ -mod cli; +pub mod cli; pub mod config; mod latency; mod notifier; -pub use cli::cli_app; +use crate::cli::ValidatorClient; pub use config::Config; use initialized_validators::InitializedValidators; use metrics::set_gauge; @@ -11,11 +11,10 @@ use monitoring_api::{MonitoringHttpClient, ProcessType}; use sensitive_url::SensitiveUrl; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use account_utils::validator_definitions::ValidatorDefinitions; use beacon_node_fallback::{ start_fallback_updater_service, BeaconNodeFallback, CandidateBeaconNode, }; - -use account_utils::validator_definitions::ValidatorDefinitions; use clap::ArgMatches; use doppelganger_service::DoppelgangerService; use environment::RuntimeContext; @@ -97,8 +96,9 @@ impl ProductionValidatorClient { pub async fn new_from_cli( context: RuntimeContext, cli_args: &ArgMatches, + validator_client_config: &ValidatorClient, ) -> Result { - let config = Config::from_cli(cli_args, context.log()) + let config = Config::from_cli(cli_args, validator_client_config, context.log()) .map_err(|e| format!("Unable to initialize config: {}", e))?; Self::new(context, config).await } @@ -204,15 +204,15 @@ impl ProductionValidatorClient { config.initialized_validators.clone(), log.clone(), ) - .await - .map_err(|e| { - match e { - UnableToOpenVotingKeystore(err) => { - format!("Unable to initialize validators: {:?}. If you have recently moved the location of your data directory \ + .await + .map_err(|e| { + match e { + UnableToOpenVotingKeystore(err) => { + format!("Unable to initialize validators: {:?}. If you have recently moved the location of your data directory \ make sure to update the location of voting_keystore_path in your validator_definitions.yml", err) - }, - err => { - format!("Unable to initialize validators: {:?}", err)} + }, + err => { + format!("Unable to initialize validators: {:?}", err)} } })?; diff --git a/validator_client/validator_services/Cargo.toml b/validator_client/validator_services/Cargo.toml index 278370e79b..c3c5ff1768 100644 --- a/validator_client/validator_services/Cargo.toml +++ b/validator_client/validator_services/Cargo.toml @@ -6,7 +6,8 @@ authors = ["Sigma Prime "] [dependencies] beacon_node_fallback = { workspace = true } -bls = { workspace = true } +bls = { workspace = true } +either = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } graffiti_file = { workspace = true } diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index 76bba37a81..62dfa0bfa3 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -1,5 +1,6 @@ use crate::duties_service::{DutiesService, DutyAndProof}; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; +use either::Either; use futures::future::join_all; use logging::crit; use slot_clock::SlotClock; @@ -451,7 +452,7 @@ impl AttestationService Some(a), Err(e) => { // This shouldn't happen unless BN and VC are out of sync with @@ -468,8 +469,12 @@ impl AttestationService>(); + beacon_node - .post_beacon_pool_attestations_v2(&single_attestations, fork_name) + .post_beacon_pool_attestations_v2::( + Either::Right(single_attestations), + fork_name, + ) .await } else { beacon_node diff --git a/validator_manager/src/lib.rs b/validator_manager/src/lib.rs index 8e43cd5977..9beccd3bde 100644 --- a/validator_manager/src/lib.rs +++ b/validator_manager/src/lib.rs @@ -1,5 +1,5 @@ -use clap::{Arg, ArgAction, ArgMatches, Command}; -use clap_utils::{get_color_style, FLAG_HEADER}; +use clap::{ArgMatches, Command}; +use clap_utils::get_color_style; use common::write_to_json_file; use environment::Environment; use serde::Serialize; @@ -46,16 +46,6 @@ pub fn cli_app() -> Command { .display_order(0) .styles(get_color_style()) .about("Utilities for managing a Lighthouse validator client via the HTTP API.") - .arg( - Arg::new("help") - .long("help") - .short('h') - .help("Prints help information") - .action(ArgAction::HelpLong) - .display_order(0) - .help_heading(FLAG_HEADER) - .global(true), - ) .subcommand(create_validators::cli_app()) .subcommand(import_validators::cli_app()) .subcommand(move_validators::cli_app()) diff --git a/wordlist.txt b/wordlist.txt index 6287366cbc..bb8b46b525 100644 --- a/wordlist.txt +++ b/wordlist.txt @@ -162,6 +162,7 @@ keypair keypairs keystore keystores +leveldb linter linux localhost @@ -191,6 +192,7 @@ pre pubkey pubkeys rc +redb reimport resync roadmap