diff --git a/Cargo.lock b/Cargo.lock index b3b4069e8c..b98e096718 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,7 +79,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -92,7 +92,7 @@ dependencies = [ "cipher 0.3.0", "cpufeatures", "ctr 0.8.0", - "opaque-debug", + "opaque-debug 0.3.1", ] [[package]] @@ -674,6 +674,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7862e21c893d65a1650125d157eaeec691439379a1cee17ee49031b79236ada4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "auto_impl" version = "1.2.1" @@ -724,6 +736,28 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "base58" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" + +[[package]] +name = "base58check" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ee2fe4c9a0c84515f136aaae2466744a721af6d63339c18689d9e995d74d99b" +dependencies = [ + "base58", + "sha2 0.8.2", +] + +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + [[package]] name = "base64" version = "0.13.1" @@ -808,7 +842,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" dependencies = [ "account_utils", "beacon_chain", @@ -880,6 +914,12 @@ dependencies = [ "types", ] +[[package]] +name = "bech32" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dabbe35f96fb9507f7330793dc490461b2962659ac5d427181e451a623751d1" + [[package]] name = "bincode" version = "1.3.3" @@ -939,6 +979,16 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +[[package]] +name = "bitvec" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +dependencies = [ + "either", + "radium 0.3.0", +] + [[package]] name = "bitvec" version = "0.20.4" @@ -972,14 +1022,26 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding 0.1.5", + "byte-tools", + "byteorder", + "generic-array 0.12.4", +] + [[package]] name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding", - "generic-array", + "block-padding 0.2.1", + "generic-array 0.14.7", ] [[package]] @@ -988,7 +1050,16 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array", + "generic-array 0.14.7", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", ] [[package]] @@ -1046,7 +1117,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" dependencies = [ "beacon_node", "bytes", @@ -1105,6 +1176,12 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + [[package]] name = "byteorder" version = "1.5.0" @@ -1187,6 +1264,20 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.26", + "serde", + "serde_json", + "thiserror 2.0.12", +] + [[package]] name = "cast" version = "0.3.0" @@ -1296,7 +1387,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -1403,6 +1494,7 @@ dependencies = [ "monitoring_api", "network", "operation_pool", + "rand 0.8.5", "sensitive_url", "serde", "serde_json", @@ -1430,6 +1522,63 @@ dependencies = [ "cc", ] +[[package]] +name = "coins-bip32" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634c509653de24b439672164bbf56f5f582a2ab0e313d3b0f6af0b7345cf2560" +dependencies = [ + "bincode", + "bs58 0.4.0", + "coins-core", + "digest 0.10.7", + "getrandom 0.2.15", + "hmac 0.12.1", + "k256 0.11.6", + "lazy_static", + "serde", + "sha2 0.10.8", + "thiserror 1.0.69", +] + +[[package]] +name = "coins-bip39" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a11892bcac83b4c6e95ab84b5b06c76d9d70ad73548dd07418269c5c7977171" +dependencies = [ + "bitvec 0.17.4", + "coins-bip32", + "getrandom 0.2.15", + "hex", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror 1.0.69", +] + +[[package]] +name = "coins-core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c94090a6663f224feae66ab01e41a2555a8296ee07b5f20dab8888bdefc9f617" +dependencies = [ + "base58check", + "base64 0.12.3", + "bech32", + "blake2", + "digest 0.10.7", + "generic-array 0.14.7", + "hex", + "ripemd", + "serde", + "serde_derive", + "sha2 0.10.8", + "sha3 0.10.8", + "thiserror 1.0.69", +] + [[package]] name = "colorchoice" version = "1.0.3" @@ -1521,6 +1670,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1654,9 +1812,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] @@ -1698,7 +1856,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ - "generic-array", + "generic-array 0.14.7", "rand_core 0.6.4", "subtle", "zeroize", @@ -1710,7 +1868,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "generic-array", + "generic-array 0.14.7", "rand_core 0.6.4", "subtle", "zeroize", @@ -1722,7 +1880,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array", + "generic-array 0.14.7", "rand_core 0.6.4", "typenum", ] @@ -1733,7 +1891,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array", + "generic-array 0.14.7", "subtle", ] @@ -1743,7 +1901,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ - "generic-array", + "generic-array 0.14.7", "subtle", ] @@ -1915,7 +2073,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f" dependencies = [ "data-encoding", - "syn 1.0.109", + "syn 2.0.100", ] [[package]] @@ -2038,7 +2196,7 @@ version = "0.99.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3da29a38df43d6f156149c9b43ded5e018ddff2a855cf2cfd62e8cd7d079c69f" dependencies = [ - "convert_case", + "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version 0.4.1", @@ -2086,13 +2244,22 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array 0.12.4", +] + [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -2321,8 +2488,9 @@ dependencies = [ "der 0.6.1", "digest 0.10.7", "ff 0.12.1", - "generic-array", + "generic-array 0.14.7", "group 0.12.1", + "pkcs8 0.9.0", "rand_core 0.6.4", "sec1 0.3.0", "subtle", @@ -2339,7 +2507,7 @@ dependencies = [ "crypto-bigint 0.5.5", "digest 0.10.7", "ff 0.13.1", - "generic-array", + "generic-array 0.14.7", "group 0.13.0", "pem-rfc7468", "pkcs8 0.10.2", @@ -2470,6 +2638,28 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "eth-keystore" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" +dependencies = [ + "aes 0.8.4", + "ctr 0.9.2", + "digest 0.10.7", + "hex", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "rand 0.8.5", + "scrypt 0.10.0", + "serde", + "serde_json", + "sha2 0.10.8", + "sha3 0.10.8", + "thiserror 1.0.69", + "uuid 0.8.2", +] + [[package]] name = "eth1" version = "0.2.0" @@ -2530,6 +2720,7 @@ dependencies = [ "multiaddr", "pretty_reqwest_error", "proto_array", + "rand 0.8.5", "reqwest", "reqwest-eventsource", "sensitive_url", @@ -2537,6 +2728,7 @@ dependencies = [ "serde_json", "slashing_protection", "ssz_types", + "test_random_derive", "tokio", "types", "zeroize", @@ -2586,7 +2778,7 @@ dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", "rand 0.8.5", - "scrypt", + "scrypt 0.7.0", "serde", "serde_json", "serde_repr", @@ -2853,15 +3045,17 @@ checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" dependencies = [ "arrayvec", "bytes", - "cargo_metadata", + "cargo_metadata 0.15.4", "chrono", + "convert_case 0.6.0", "elliptic-curve 0.12.3", "ethabi 18.0.0", - "generic-array", + "generic-array 0.14.7", "hex", "k256 0.11.6", "once_cell", "open-fastrlp", + "proc-macro2", "rand 0.8.5", "rlp", "rlp-derive", @@ -2874,6 +3068,49 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "ethers-etherscan" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9713f525348e5dde025d09b0a4217429f8074e8ff22c886263cc191e87d8216" +dependencies = [ + "ethers-core", + "getrandom 0.2.15", + "reqwest", + "semver 1.0.26", + "serde", + "serde-aux", + "serde_json", + "thiserror 1.0.69", + "tracing", +] + +[[package]] +name = "ethers-middleware" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e71df7391b0a9a51208ffb5c7f2d068900e99d6b3128d3a4849d138f194778b7" +dependencies = [ + "async-trait", + "auto_impl 0.5.0", + "ethers-contract", + "ethers-core", + "ethers-etherscan", + "ethers-providers", + "ethers-signers", + "futures-locks", + "futures-util", + "instant", + "reqwest", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-futures", + "url", +] + [[package]] name = "ethers-providers" version = "1.0.2" @@ -2881,7 +3118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" dependencies = [ "async-trait", - "auto_impl", + "auto_impl 1.2.1", "base64 0.13.1", "ethers-core", "futures-core", @@ -2909,6 +3146,24 @@ dependencies = [ "ws_stream_wasm", ] +[[package]] +name = "ethers-signers" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f41ced186867f64773db2e55ffdd92959e094072a1d09a5e5e831d443204f98" +dependencies = [ + "async-trait", + "coins-bip32", + "coins-bip39", + "elliptic-curve 0.12.3", + "eth-keystore", + "ethers-core", + "hex", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror 1.0.69", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -2954,7 +3209,9 @@ dependencies = [ "async-channel 1.9.0", "deposit_contract", "ethers-core", + "ethers-middleware", "ethers-providers", + "ethers-signers", "execution_layer", "fork_choice", "futures", @@ -3032,6 +3289,12 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -3057,7 +3320,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" dependencies = [ "arrayvec", - "auto_impl", + "auto_impl 1.2.1", "bytes", ] @@ -3068,7 +3331,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" dependencies = [ "arrayvec", - "auto_impl", + "auto_impl 1.2.1", "bytes", ] @@ -3320,6 +3583,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "futures-locks" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" +dependencies = [ + "futures-channel", + "futures-task", +] + [[package]] name = "futures-macro" version = "0.3.31" @@ -3400,6 +3673,15 @@ dependencies = [ "windows 0.58.0", ] +[[package]] +name = "generic-array" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" +dependencies = [ + "typenum", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -3464,7 +3746,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ - "opaque-debug", + "opaque-debug 0.3.1", "polyval", ] @@ -3834,7 +4116,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ "digest 0.9.0", - "generic-array", + "generic-array 0.14.7", "hmac 0.8.1", ] @@ -4452,7 +4734,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ - "generic-array", + "generic-array 0.14.7", ] [[package]] @@ -4690,7 +4972,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" dependencies = [ "account_utils", "beacon_chain", @@ -4761,7 +5043,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -5252,7 +5534,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" dependencies = [ "account_manager", "account_utils", @@ -5458,6 +5740,7 @@ dependencies = [ "tracing-core", "tracing-log", "tracing-subscriber", + "workspace_members", ] [[package]] @@ -6168,6 +6451,12 @@ version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + [[package]] name = "opaque-debug" version = "0.3.1" @@ -6181,7 +6470,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" dependencies = [ "arrayvec", - "auto_impl", + "auto_impl 1.2.1", "bytes", "ethereum-types 0.14.1", "open-fastrlp-derive", @@ -6201,9 +6490,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.71" +version = "0.10.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd" +checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" dependencies = [ "bitflags 2.9.0", "cfg-if", @@ -6242,9 +6531,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.106" +version = "0.9.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" +checksum = "8288979acd84749c744a9014b4382d42b8f7b2592847b5afb2ed29e5d16ede07" dependencies = [ "cc", "libc", @@ -6275,6 +6564,15 @@ dependencies = [ "types", ] +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + [[package]] name = "overload" version = "0.1.1" @@ -6608,7 +6906,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", - "opaque-debug", + "opaque-debug 0.3.1", "universal-hash", ] @@ -6620,7 +6918,7 @@ checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", - "opaque-debug", + "opaque-debug 0.3.1", "universal-hash", ] @@ -6718,6 +7016,30 @@ dependencies = [ "toml_edit 0.22.24", ] +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + [[package]] name = "proc-macro2" version = "1.0.94" @@ -6974,6 +7296,12 @@ dependencies = [ "rusqlite", ] +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" + [[package]] name = "radium" version = "0.6.2" @@ -7278,6 +7606,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rlp" version = "0.5.2" @@ -7624,6 +7961,15 @@ dependencies = [ "cipher 0.3.0", ] +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher 0.4.4", +] + [[package]] name = "same-file" version = "1.0.6" @@ -7695,10 +8041,22 @@ checksum = "879588d8f90906e73302547e20fffefdd240eb3e0e744e142321f5d49dea0518" dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", - "salsa20", + "salsa20 0.8.1", "sha2 0.9.9", ] +[[package]] +name = "scrypt" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" +dependencies = [ + "hmac 0.12.1", + "pbkdf2 0.11.0", + "salsa20 0.10.2", + "sha2 0.10.8", +] + [[package]] name = "sct" version = "0.7.1" @@ -7717,7 +8075,7 @@ checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct 0.1.1", "der 0.6.1", - "generic-array", + "generic-array 0.14.7", "pkcs8 0.9.0", "subtle", "zeroize", @@ -7731,7 +8089,7 @@ checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", "der 0.7.9", - "generic-array", + "generic-array 0.14.7", "pkcs8 0.10.2", "subtle", "zeroize", @@ -7810,6 +8168,27 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-aux" +version = "4.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5290c39c5f6992b9dddbda28541d965dba46468294e6018a408fa297e6c602de" +dependencies = [ + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + [[package]] name = "serde_array_query" version = "0.1.0" @@ -7890,6 +8269,18 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + [[package]] name = "sha2" version = "0.9.9" @@ -7900,7 +8291,7 @@ dependencies = [ "cfg-if", "cpufeatures", "digest 0.9.0", - "opaque-debug", + "opaque-debug 0.3.1", ] [[package]] @@ -7923,7 +8314,7 @@ dependencies = [ "block-buffer 0.9.0", "digest 0.9.0", "keccak", - "opaque-debug", + "opaque-debug 0.3.1", ] [[package]] @@ -9152,6 +9543,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -10152,6 +10549,14 @@ dependencies = [ "bitflags 2.9.0", ] +[[package]] +name = "workspace_members" +version = "0.1.0" +dependencies = [ + "cargo_metadata 0.19.2", + "quote", +] + [[package]] name = "write16" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 5284713fc2..31f50068dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ members = [ "common/unused_port", "common/validator_dir", "common/warp_utils", + "common/workspace_members", "consensus/fixed_bytes", "consensus/fork_choice", @@ -120,6 +121,7 @@ bincode = "1" bitvec = "1" byteorder = "1" bytes = "1" +cargo_metadata = "0.19" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. @@ -139,6 +141,8 @@ ethereum_ssz = "0.8.2" ethereum_ssz_derive = "0.8.2" ethers-core = "1" ethers-providers = { version = "1", default-features = false } +ethers-signers = { version = "1", default-features = false } +ethers-middleware = { version = "1", default-features = false } exit-future = "0.2" fnv = "1" fs2 = "0.4" @@ -246,6 +250,7 @@ kzg = { path = "crypto/kzg" } metrics = { path = "common/metrics" } lighthouse_network = { path = "beacon_node/lighthouse_network" } lighthouse_version = { path = "common/lighthouse_version" } +workspace_members = { path = "common/workspace_members" } lockfile = { path = "common/lockfile" } logging = { path = "common/logging" } lru_cache = { path = "common/lru_cache" } diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index cf963535c7..30d6846964 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" authors = [ "Paul Hauner ", "Age Manning ( num_of_blobs: usize, spec: &ChainSpec, -) -> (SignedBeaconBlock, BlobsList) { +) -> (SignedBeaconBlock, BlobsList, KzgProofs) { let mut block = BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)); let mut body = block.body_mut(); let blob_kzg_commitments = body.blob_kzg_commitments_mut().unwrap(); @@ -27,8 +27,9 @@ fn create_test_block_and_blobs( .map(|_| Blob::::default()) .collect::>() .into(); + let proofs = vec![KzgProof::empty(); num_of_blobs * spec.number_of_columns as usize].into(); - (signed_block, blobs) + (signed_block, blobs, proofs) } fn all_benches(c: &mut Criterion) { @@ -37,10 +38,11 @@ fn all_benches(c: &mut Criterion) { let kzg = get_kzg(&spec); for blob_count in [1, 2, 3, 6] { - let (signed_block, blobs) = create_test_block_and_blobs::(blob_count, &spec); + let (signed_block, blobs, proofs) = create_test_block_and_blobs::(blob_count, &spec); let column_sidecars = blobs_to_data_column_sidecars( &blobs.iter().collect::>(), + proofs.to_vec(), &signed_block, &kzg, &spec, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9692441aba..64ef5ef17e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -31,9 +31,9 @@ use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; +use crate::fetch_blobs::EngineGetBlobsOutput; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; use crate::graffiti_calculator::GraffitiCalculator; -use crate::head_tracker::{HeadTracker, HeadTrackerReader, SszHeadTracker}; use crate::kzg_utils::reconstruct_blobs; use crate::light_client_finality_update_verification::{ Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, @@ -57,7 +57,7 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; -use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; +use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; @@ -92,6 +92,7 @@ use operation_pool::{ }; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; use proto_array::{DoNotReOrg, ProposerHeadError}; +use rand::RngCore; use safe_arith::SafeArith; use slasher::Slasher; use slot_clock::SlotClock; @@ -122,7 +123,6 @@ use store::{ KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; -use tokio::sync::oneshot; use tokio_stream::Stream; use tracing::{debug, error, info, trace, warn}; use tree_hash::TreeHash; @@ -454,8 +454,6 @@ pub struct BeaconChain { /// A handler for events generated by the beacon chain. This is only initialized when the /// HTTP server is enabled. pub event_handler: Option>, - /// Used to track the heads of the beacon chain. - pub(crate) head_tracker: Arc, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: RwLock, /// A cache of eth1 deposit data at epoch boundaries for deposit finalization @@ -494,6 +492,8 @@ pub struct BeaconChain { pub data_availability_checker: Arc>, /// The KZG trusted setup used by this chain. pub kzg: Arc, + /// RNG instance used by the chain. Currently used for shuffling column sidecars in block publishing. + pub rng: Arc>>, } pub enum BeaconBlockResponseWrapper { @@ -607,57 +607,13 @@ impl BeaconChain { }) } - /// Persists the head tracker and fork choice. + /// Return a database operation for writing the `PersistedBeaconChain` to disk. /// - /// We do it atomically even though no guarantees need to be made about blocks from - /// the head tracker also being present in fork choice. - pub fn persist_head_and_fork_choice(&self) -> Result<(), Error> { - let mut batch = vec![]; - - let _head_timer = metrics::start_timer(&metrics::PERSIST_HEAD); - - // Hold a lock to head_tracker until it has been persisted to disk. Otherwise there's a race - // condition with the pruning thread which can result in a block present in the head tracker - // but absent in the DB. This inconsistency halts pruning and dramastically increases disk - // size. Ref: https://github.com/sigp/lighthouse/issues/4773 - let head_tracker = self.head_tracker.0.read(); - batch.push(self.persist_head_in_batch(&head_tracker)); - - let _fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE); - batch.push(self.persist_fork_choice_in_batch()); - - self.store.hot_db.do_atomically(batch)?; - drop(head_tracker); - - Ok(()) - } - - /// Return a `PersistedBeaconChain` without reference to a `BeaconChain`. - pub fn make_persisted_head( - genesis_block_root: Hash256, - head_tracker_reader: &HeadTrackerReader, - ) -> PersistedBeaconChain { - PersistedBeaconChain { - _canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT, - genesis_block_root, - ssz_head_tracker: SszHeadTracker::from_map(head_tracker_reader), - } - } - - /// Return a database operation for writing the beacon chain head to disk. - pub fn persist_head_in_batch( - &self, - head_tracker_reader: &HeadTrackerReader, - ) -> KeyValueStoreOp { - Self::persist_head_in_batch_standalone(self.genesis_block_root, head_tracker_reader) - } - - pub fn persist_head_in_batch_standalone( - genesis_block_root: Hash256, - head_tracker_reader: &HeadTrackerReader, - ) -> KeyValueStoreOp { - Self::make_persisted_head(genesis_block_root, head_tracker_reader) - .as_kv_store_op(BEACON_CHAIN_DB_KEY) + /// These days the `PersistedBeaconChain` is only used to store the genesis block root, so it + /// should only ever be written once at startup. It used to be written more frequently, but + /// this is no longer necessary. + pub fn persist_head_in_batch_standalone(genesis_block_root: Hash256) -> KeyValueStoreOp { + PersistedBeaconChain { genesis_block_root }.as_kv_store_op(BEACON_CHAIN_DB_KEY) } /// Load fork choice from disk, returning `None` if it isn't found. @@ -738,7 +694,7 @@ impl BeaconChain { /// /// - `slot` always increases by `1`. /// - Skipped slots contain the root of the closest prior - /// non-skipped slot (identical to the way they are stored in `state.block_roots`). + /// non-skipped slot (identical to the way they are stored in `state.block_roots`). /// - Iterator returns `(Hash256, Slot)`. /// /// Will return a `BlockOutOfRange` error if the requested start slot is before the period of @@ -802,7 +758,7 @@ impl BeaconChain { /// /// - `slot` always decreases by `1`. /// - Skipped slots contain the root of the closest prior - /// non-skipped slot (identical to the way they are stored in `state.block_roots`) . + /// non-skipped slot (identical to the way they are stored in `state.block_roots`) . /// - Iterator returns `(Hash256, Slot)`. /// - The provided `block_root` is included as the first item in the iterator. pub fn rev_iter_block_roots_from( @@ -831,7 +787,7 @@ impl BeaconChain { /// - `slot` always decreases by `1`. /// - Iterator returns `(Hash256, Slot)`. /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot - /// returned may be earlier than the wall-clock slot. + /// returned may be earlier than the wall-clock slot. pub fn rev_iter_state_roots_from<'a>( &'a self, state_root: Hash256, @@ -1450,12 +1406,13 @@ impl BeaconChain { /// /// Returns `(block_root, block_slot)`. pub fn heads(&self) -> Vec<(Hash256, Slot)> { - self.head_tracker.heads() - } - - /// Only used in tests. - pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool { - self.head_tracker.contains_head((*block_hash).into()) + self.canonical_head + .fork_choice_read_lock() + .proto_array() + .heads_descended_from_finalization::() + .iter() + .map(|node| (node.root, node.slot)) + .collect() } /// Returns the `BeaconState` at the given slot. @@ -1735,8 +1692,6 @@ impl BeaconChain { let notif = ManualFinalizationNotification { state_root: state_root.into(), checkpoint, - head_tracker: self.head_tracker.clone(), - genesis_block_root: self.genesis_block_root, }; self.store_migrator.process_manual_finalization(notif); @@ -3185,16 +3140,11 @@ impl BeaconChain { } /// Process blobs retrieved from the EL and returns the `AvailabilityProcessingStatus`. - /// - /// `data_column_recv`: An optional receiver for `DataColumnSidecarList`. - /// If PeerDAS is enabled, this receiver will be provided and used to send - /// the `DataColumnSidecar`s once they have been successfully computed. pub async fn process_engine_blobs( self: &Arc, slot: Slot, block_root: Hash256, - blobs: FixedBlobSidecarList, - data_column_recv: Option>>, + engine_get_blobs_output: EngineGetBlobsOutput, ) -> Result { // If this block has already been imported to forkchoice it must have been available, so // we don't need to process its blobs again. @@ -3208,15 +3158,12 @@ impl BeaconChain { // process_engine_blobs is called for both pre and post PeerDAS. However, post PeerDAS // consumers don't expect the blobs event to fire erratically. - if !self - .spec - .is_peer_das_enabled_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())) - { + if let EngineGetBlobsOutput::Blobs(blobs) = &engine_get_blobs_output { self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); } let r = self - .check_engine_blob_availability_and_import(slot, block_root, blobs, data_column_recv) + .check_engine_blobs_availability_and_import(slot, block_root, engine_get_blobs_output) .await; self.remove_notified(&block_root, r) } @@ -3666,20 +3613,24 @@ impl BeaconChain { .await } - async fn check_engine_blob_availability_and_import( + async fn check_engine_blobs_availability_and_import( self: &Arc, slot: Slot, block_root: Hash256, - blobs: FixedBlobSidecarList, - data_column_recv: Option>>, + engine_get_blobs_output: EngineGetBlobsOutput, ) -> Result { - self.check_blobs_for_slashability(block_root, &blobs)?; - let availability = self.data_availability_checker.put_engine_blobs( - block_root, - slot.epoch(T::EthSpec::slots_per_epoch()), - blobs, - data_column_recv, - )?; + let availability = match engine_get_blobs_output { + EngineGetBlobsOutput::Blobs(blobs) => { + self.check_blobs_for_slashability(block_root, &blobs)?; + self.data_availability_checker + .put_engine_blobs(block_root, blobs)? + } + EngineGetBlobsOutput::CustodyColumns(data_columns) => { + self.check_columns_for_slashability(block_root, &data_columns)?; + self.data_availability_checker + .put_engine_data_columns(block_root, data_columns)? + } + }; self.process_availability(slot, availability, || Ok(())) .await @@ -3693,27 +3644,7 @@ impl BeaconChain { block_root: Hash256, custody_columns: DataColumnSidecarList, ) -> Result { - // Need to scope this to ensure the lock is dropped before calling `process_availability` - // Even an explicit drop is not enough to convince the borrow checker. - { - let mut slashable_cache = self.observed_slashable.write(); - // Assumes all items in custody_columns are for the same block_root - if let Some(column) = custody_columns.first() { - let header = &column.signed_block_header; - if verify_header_signature::(self, header).is_ok() { - slashable_cache - .observe_slashable( - header.message.slot, - header.message.proposer_index, - block_root, - ) - .map_err(|e| BlockError::BeaconChainError(e.into()))?; - if let Some(slasher) = self.slasher.as_ref() { - slasher.accept_block_header(header.clone()); - } - } - } - } + self.check_columns_for_slashability(block_root, &custody_columns)?; // This slot value is purely informative for the consumers of // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. @@ -3725,6 +3656,31 @@ impl BeaconChain { .await } + fn check_columns_for_slashability( + self: &Arc, + block_root: Hash256, + custody_columns: &DataColumnSidecarList, + ) -> Result<(), BlockError> { + let mut slashable_cache = self.observed_slashable.write(); + // Assumes all items in custody_columns are for the same block_root + if let Some(column) = custody_columns.first() { + let header = &column.signed_block_header; + if verify_header_signature::(self, header).is_ok() { + slashable_cache + .observe_slashable( + header.message.slot, + header.message.proposer_index, + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(header.clone()); + } + } + } + Ok(()) + } + /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` /// /// An error is returned if the block was unable to be imported. It may be partially imported @@ -3762,7 +3718,6 @@ impl BeaconChain { state, parent_block, parent_eth1_finalization_data, - confirmed_state_roots, consensus_context, } = import_data; @@ -3786,7 +3741,6 @@ impl BeaconChain { block, block_root, state, - confirmed_state_roots, payload_verification_outcome.payload_verification_status, parent_block, parent_eth1_finalization_data, @@ -3824,7 +3778,6 @@ impl BeaconChain { signed_block: AvailableBlock, block_root: Hash256, mut state: BeaconState, - confirmed_state_roots: Vec, payload_verification_status: PayloadVerificationStatus, parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, @@ -4012,11 +3965,6 @@ impl BeaconChain { let block = signed_block.message(); let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); - ops.extend( - confirmed_state_roots - .into_iter() - .map(StoreOp::DeleteStateTemporaryFlag), - ); ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); @@ -4043,9 +3991,6 @@ impl BeaconChain { // about it. let block_time_imported = timestamp_now(); - let parent_root = block.parent_root(); - let slot = block.slot(); - let current_eth1_finalization_data = Eth1FinalizationData { eth1_data: state.eth1_data().clone(), eth1_deposit_index: state.eth1_deposit_index(), @@ -4062,13 +4007,10 @@ impl BeaconChain { &mut state, ) .unwrap_or_else(|e| { - error!("error caching light_client data {:?}", e); + debug!("error caching light_client data {:?}", e); }); } - self.head_tracker - .register_block(block_root, parent_root, slot); - metrics::stop_timer(db_write_timer); metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); @@ -5860,15 +5802,26 @@ impl BeaconChain { let kzg_proofs = Vec::from(proofs); let kzg = self.kzg.as_ref(); - - // TODO(fulu): we no longer need blob proofs from PeerDAS and could avoid computing. - kzg_utils::validate_blobs::( - kzg, - expected_kzg_commitments, - blobs.iter().collect(), - &kzg_proofs, - ) - .map_err(BlockProductionError::KzgError)?; + if self + .spec + .is_peer_das_enabled_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())) + { + kzg_utils::validate_blobs_and_cell_proofs::( + kzg, + blobs.iter().collect(), + &kzg_proofs, + expected_kzg_commitments, + ) + .map_err(BlockProductionError::KzgError)?; + } else { + kzg_utils::validate_blobs::( + kzg, + expected_kzg_commitments, + blobs.iter().collect(), + &kzg_proofs, + ) + .map_err(BlockProductionError::KzgError)?; + } Some((kzg_proofs.into(), blobs)) } @@ -7180,35 +7133,39 @@ impl BeaconChain { ); Ok(Some(StoreOp::PutDataColumns(block_root, data_columns))) } - AvailableBlockData::DataColumnsRecv(data_column_recv) => { - // Blobs were available from the EL, in this case we wait for the data columns to be computed (blocking). - let _column_recv_timer = - metrics::start_timer(&metrics::BLOCK_PROCESSING_DATA_COLUMNS_WAIT); - // Unable to receive data columns from sender, sender is either dropped or - // failed to compute data columns from blobs. We restore fork choice here and - // return to avoid inconsistency in database. - let computed_data_columns = data_column_recv - .blocking_recv() - .map_err(|e| format!("Did not receive data columns from sender: {e:?}"))?; - debug!( - %block_root, - count = computed_data_columns.len(), - "Writing data columns to store" - ); - // TODO(das): Store only this node's custody columns - Ok(Some(StoreOp::PutDataColumns( - block_root, - computed_data_columns, - ))) + } + } + + /// Retrieves block roots (in ascending slot order) within some slot range from fork choice. + pub fn block_roots_from_fork_choice(&self, start_slot: u64, count: u64) -> Vec { + let head_block_root = self.canonical_head.cached_head().head_block_root(); + let fork_choice_read_lock = self.canonical_head.fork_choice_read_lock(); + let block_roots_iter = fork_choice_read_lock + .proto_array() + .iter_block_roots(&head_block_root); + let end_slot = start_slot.saturating_add(count); + let mut roots = vec![]; + + for (root, slot) in block_roots_iter { + if slot < end_slot && slot >= start_slot { + roots.push(root); + } + if slot < start_slot { + break; } } + + drop(fork_choice_read_lock); + // return in ascending slot order + roots.reverse(); + roots } } impl Drop for BeaconChain { fn drop(&mut self) { let drop = || -> Result<(), Error> { - self.persist_head_and_fork_choice()?; + self.persist_fork_choice()?; self.persist_op_pool()?; self.persist_eth1_cache() }; diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index d10bbfbbc5..567433caee 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -178,7 +178,7 @@ pub fn compute_proposer_duties_from_head( /// - Returns an error if `state.current_epoch() > target_epoch`. /// - No-op if `state.current_epoch() == target_epoch`. /// - It must be the case that `state.canonical_root() == state_root`, but this function will not -/// check that. +/// check that. pub fn ensure_state_is_in_epoch( state: &mut BeaconState, state_root: Hash256, diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 70d653524b..074ae93a79 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -5,7 +5,7 @@ //! - Verification for gossip blocks (i.e., should we gossip some block from the network). //! - Verification for normal blocks (e.g., some block received on the RPC during a parent lookup). //! - Verification for chain segments (e.g., some chain of blocks received on the RPC during a -//! sync). +//! sync). //! //! The primary source of complexity here is that we wish to avoid doing duplicate work as a block //! moves through the verification process. For example, if some block is verified for gossip, we @@ -97,8 +97,8 @@ use tracing::{debug, error}; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, - Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, + Hash256, InconsistentFork, KzgProofs, PublicKey, PublicKeyBytes, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; pub const POS_PANDA_BANNER: &str = r#" @@ -755,6 +755,7 @@ pub fn build_blob_data_column_sidecars( chain: &BeaconChain, block: &SignedBeaconBlock>, blobs: BlobsList, + kzg_cell_proofs: KzgProofs, ) -> Result, DataColumnSidecarError> { // Only attempt to build data columns if blobs is non empty to avoid skewing the metrics. if blobs.is_empty() { @@ -766,8 +767,14 @@ pub fn build_blob_data_column_sidecars( &[&blobs.len().to_string()], ); let blob_refs = blobs.iter().collect::>(); - let sidecars = blobs_to_data_column_sidecars(&blob_refs, block, &chain.kzg, &chain.spec) - .discard_timer_on_break(&mut timer)?; + let sidecars = blobs_to_data_column_sidecars( + &blob_refs, + kzg_cell_proofs.to_vec(), + block, + &chain.kzg, + &chain.spec, + ) + .discard_timer_on_break(&mut timer)?; drop(timer); Ok(sidecars) } @@ -1260,40 +1267,6 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc } } -impl IntoExecutionPendingBlock for Arc> { - /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` - /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. - fn into_execution_pending_block_slashable( - self, - block_root: Hash256, - chain: &Arc>, - notify_execution_layer: NotifyExecutionLayer, - ) -> Result, BlockSlashInfo> { - // Perform an early check to prevent wasting time on irrelevant blocks. - let block_root = check_block_relevancy(&self, block_root, chain) - .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; - let maybe_available = chain - .data_availability_checker - .verify_kzg_for_rpc_block(RpcBlock::new_without_blobs(Some(block_root), self.clone())) - .map_err(|e| { - BlockSlashInfo::SignatureNotChecked( - self.signed_block_header(), - BlockError::AvailabilityCheck(e), - ) - })?; - SignatureVerifiedBlock::check_slashable(maybe_available, block_root, chain)? - .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) - } - - fn block(&self) -> &SignedBeaconBlock { - self - } - - fn block_cloned(&self) -> Arc> { - self.clone() - } -} - impl IntoExecutionPendingBlock for RpcBlock { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. @@ -1453,22 +1426,8 @@ impl ExecutionPendingBlock { let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); - // Stage a batch of operations to be completed atomically if this block is imported - // successfully. If there is a skipped slot, we include the state root of the pre-state, - // which may be an advanced state that was stored in the DB with a `temporary` flag. let mut state = parent.pre_state; - let mut confirmed_state_roots = - if block.slot() > state.slot() && state.slot() > parent.beacon_block.slot() { - // Advanced pre-state. Delete its temporary flag. - let pre_state_root = state.update_tree_hash_cache()?; - vec![pre_state_root] - } else { - // Pre state is either unadvanced, or should not be stored long-term because there - // is no skipped slot between `parent` and `block`. - vec![] - }; - // The block must have a higher slot than its parent. if block.slot() <= parent.beacon_block.slot() { return Err(BlockError::BlockIsNotLaterThanParent { @@ -1515,38 +1474,29 @@ impl ExecutionPendingBlock { // processing, but we get early access to it. let state_root = state.update_tree_hash_cache()?; - // Store the state immediately, marking it as temporary, and staging the deletion - // of its temporary status as part of the larger atomic operation. + // Store the state immediately. let txn_lock = chain.store.hot_db.begin_rw_transaction(); let state_already_exists = chain.store.load_hot_state_summary(&state_root)?.is_some(); let state_batch = if state_already_exists { - // If the state exists, it could be temporary or permanent, but in neither case - // should we rewrite it or store a new temporary flag for it. We *will* stage - // the temporary flag for deletion because it's OK to double-delete the flag, - // and we don't mind if another thread gets there first. + // If the state exists, we do not need to re-write it. vec![] } else { - vec![ - if state.slot() % T::EthSpec::slots_per_epoch() == 0 { - StoreOp::PutState(state_root, &state) - } else { - StoreOp::PutStateSummary( - state_root, - HotStateSummary::new(&state_root, &state)?, - ) - }, - StoreOp::PutStateTemporaryFlag(state_root), - ] + vec![if state.slot() % T::EthSpec::slots_per_epoch() == 0 { + StoreOp::PutState(state_root, &state) + } else { + StoreOp::PutStateSummary( + state_root, + HotStateSummary::new(&state_root, &state)?, + ) + }] }; chain .store .do_atomically_with_block_and_blobs_cache(state_batch)?; drop(txn_lock); - confirmed_state_roots.push(state_root); - state_root }; @@ -1713,7 +1663,6 @@ impl ExecutionPendingBlock { state, parent_block: parent.beacon_block, parent_eth1_finalization_data, - confirmed_state_roots, consensus_context, }, payload_verification_handle, diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index d3a6e93862..dab54dc823 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -103,14 +103,14 @@ impl RpcBlock { pub fn new_without_blobs( block_root: Option, block: Arc>, + custody_columns_count: usize, ) -> Self { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); Self { block_root, block: RpcBlockInner::Block(block), - // Block has zero columns - custody_columns_count: 0, + custody_columns_count, } } @@ -358,7 +358,6 @@ pub struct BlockImportData { pub state: BeaconState, pub parent_block: SignedBeaconBlock>, pub parent_eth1_finalization_data: Eth1FinalizationData, - pub confirmed_state_roots: Vec, pub consensus_context: ConsensusContext, } @@ -376,7 +375,6 @@ impl BlockImportData { eth1_data: <_>::default(), eth1_deposit_index: 0, }, - confirmed_state_roots: vec![], consensus_context: ConsensusContext::new(Slot::new(0)), } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index f6d18c3705..812dcbeda7 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -8,8 +8,7 @@ use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; -use crate::head_tracker::HeadTracker; -use crate::kzg_utils::blobs_to_data_column_sidecars; +use crate::kzg_utils::build_data_column_sidecars; use crate::light_client_server_cache::LightClientServerCache; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::observed_data_sidecars::ObservedDataSidecars; @@ -31,6 +30,8 @@ use logging::crit; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{Mutex, RwLock}; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; +use rand::RngCore; +use rayon::prelude::*; use slasher::Slasher; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::{per_slot_processing, AllCaches}; @@ -41,8 +42,8 @@ use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use tracing::{debug, error, info}; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, DataColumnSidecarList, Epoch, + EthSpec, FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -93,7 +94,6 @@ pub struct BeaconChainBuilder { slot_clock: Option, shutdown_sender: Option>, light_client_server_tx: Option>>, - head_tracker: Option, validator_pubkey_cache: Option>, spec: Arc, chain_config: ChainConfig, @@ -106,6 +106,7 @@ pub struct BeaconChainBuilder { task_executor: Option, validator_monitor_config: Option, import_all_data_columns: bool, + rng: Option>, } impl @@ -136,7 +137,6 @@ where slot_clock: None, shutdown_sender: None, light_client_server_tx: None, - head_tracker: None, validator_pubkey_cache: None, spec: Arc::new(E::default_spec()), chain_config: ChainConfig::default(), @@ -147,6 +147,7 @@ where task_executor: None, validator_monitor_config: None, import_all_data_columns: false, + rng: None, } } @@ -314,10 +315,6 @@ where self.genesis_block_root = Some(chain.genesis_block_root); self.genesis_state_root = Some(genesis_block.state_root()); - self.head_tracker = Some( - HeadTracker::from_ssz_container(&chain.ssz_head_tracker) - .map_err(|e| format!("Failed to decode head tracker for database: {:?}", e))?, - ); self.validator_pubkey_cache = Some(pubkey_cache); self.fork_choice = Some(fork_choice); @@ -553,15 +550,8 @@ where { // After PeerDAS recompute columns from blobs to not force the checkpointz server // into exposing another route. - let blobs = blobs - .iter() - .map(|blob_sidecar| &blob_sidecar.blob) - .collect::>(); let data_columns = - blobs_to_data_column_sidecars(&blobs, &weak_subj_block, &self.kzg, &self.spec) - .map_err(|e| { - format!("Failed to compute weak subjectivity data_columns: {e:?}") - })?; + build_data_columns_from_blobs(&weak_subj_block, &blobs, &self.kzg, &self.spec)?; // TODO(das): only persist the columns under custody store .put_data_columns(&weak_subj_block_root, data_columns) @@ -704,6 +694,14 @@ where self } + /// Sets the `rng` field. + /// + /// Currently used for shuffling column sidecars in block publishing. + pub fn rng(mut self, rng: Box) -> Self { + self.rng = Some(rng); + self + } + /// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied. /// /// An error will be returned at runtime if all required parameters have not been configured. @@ -729,7 +727,7 @@ where .genesis_state_root .ok_or("Cannot build without a genesis state root")?; let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); - let head_tracker = Arc::new(self.head_tracker.unwrap_or_default()); + let rng = self.rng.ok_or("Cannot build without an RNG")?; let beacon_proposer_cache: Arc> = <_>::default(); let mut validator_monitor = @@ -769,8 +767,6 @@ where &self.spec, )?; - // Update head tracker. - head_tracker.register_block(block_root, block.parent_root(), block.slot()); (block_root, block, true) } Err(e) => return Err(descriptive_db_error("head block", &e)), @@ -846,8 +842,7 @@ where })?; let migrator_config = self.store_migrator_config.unwrap_or_default(); - let store_migrator = - BackgroundMigrator::new(store.clone(), migrator_config, genesis_block_root); + let store_migrator = BackgroundMigrator::new(store.clone(), migrator_config); if let Some(slot) = slot_clock.now() { validator_monitor.process_valid_state( @@ -872,11 +867,10 @@ where // // This *must* be stored before constructing the `BeaconChain`, so that its `Drop` instance // doesn't write a `PersistedBeaconChain` without the rest of the batch. - let head_tracker_reader = head_tracker.0.read(); self.pending_io_batch.push(BeaconChain::< Witness, >::persist_head_in_batch_standalone( - genesis_block_root, &head_tracker_reader + genesis_block_root )); self.pending_io_batch.push(BeaconChain::< Witness, @@ -887,7 +881,6 @@ where .hot_db .do_atomically(self.pending_io_batch) .map_err(|e| format!("Error writing chain & metadata to disk: {:?}", e))?; - drop(head_tracker_reader); let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); let genesis_time = head_snapshot.beacon_state.genesis_time(); @@ -968,7 +961,6 @@ where fork_choice_signal_tx, fork_choice_signal_rx, event_handler: self.event_handler, - head_tracker, shuffling_cache: RwLock::new(ShufflingCache::new( shuffling_cache_size, head_shuffling_ids, @@ -999,6 +991,7 @@ where .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), kzg: self.kzg.clone(), + rng: Arc::new(Mutex::new(rng)), }; let head = beacon_chain.head_snapshot(); @@ -1152,6 +1145,49 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { ) } +/// Build data columns and proofs from blobs. +fn build_data_columns_from_blobs( + block: &SignedBeaconBlock, + blobs: &BlobSidecarList, + kzg: &Kzg, + spec: &ChainSpec, +) -> Result, String> { + let blob_cells_and_proofs_vec = blobs + .into_par_iter() + .map(|blob_sidecar| { + let kzg_blob_ref = blob_sidecar + .blob + .as_ref() + .try_into() + .map_err(|e| format!("Failed to convert blob to kzg blob: {e:?}"))?; + let cells_and_proofs = kzg + .compute_cells_and_proofs(kzg_blob_ref) + .map_err(|e| format!("Failed to compute cell kzg proofs: {e:?}"))?; + Ok(cells_and_proofs) + }) + .collect::, String>>()?; + + let data_columns = { + let beacon_block_body = block.message().body(); + let kzg_commitments = beacon_block_body + .blob_kzg_commitments() + .cloned() + .map_err(|e| format!("Unexpected pre Deneb block: {e:?}"))?; + let kzg_commitments_inclusion_proof = beacon_block_body + .kzg_commitments_merkle_proof() + .map_err(|e| format!("Failed to compute kzg commitments merkle proof: {e:?}"))?; + build_data_column_sidecars( + kzg_commitments, + kzg_commitments_inclusion_proof, + block.signed_block_header(), + blob_cells_and_proofs_vec, + spec, + ) + .map_err(|e| format!("Failed to compute weak subjectivity data_columns: {e:?}"))? + }; + Ok(data_columns) +} + #[cfg(not(debug_assertions))] #[cfg(test)] mod test { @@ -1161,6 +1197,8 @@ mod test { use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, }; + use rand::rngs::StdRng; + use rand::SeedableRng; use ssz::Encode; use std::time::Duration; use store::config::StoreConfig; @@ -1207,6 +1245,7 @@ mod test { .testing_slot_clock(Duration::from_secs(1)) .expect("should configure testing slot clock") .shutdown_sender(shutdown_tx) + .rng(Box::new(StdRng::seed_from_u64(42))) .build() .expect("should build"); diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index d99c6038d3..a6f5179fdc 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -53,7 +53,7 @@ use slot_clock::SlotClock; use state_processing::AllCaches; use std::sync::Arc; use std::time::Duration; -use store::{iter::StateRootsIterator, KeyValueStoreOp, StoreItem}; +use store::{iter::StateRootsIterator, KeyValueStore, KeyValueStoreOp, StoreItem}; use task_executor::{JoinHandle, ShutdownReason}; use tracing::{debug, error, info, warn}; use types::*; @@ -840,7 +840,7 @@ impl BeaconChain { ); if is_epoch_transition || reorg_distance.is_some() { - self.persist_head_and_fork_choice()?; + self.persist_fork_choice()?; self.op_pool.prune_attestations(self.epoch()?); } @@ -983,7 +983,6 @@ impl BeaconChain { self.store_migrator.process_finalization( new_finalized_state_root.into(), new_view.finalized_checkpoint, - self.head_tracker.clone(), )?; // Prune blobs in the background. @@ -998,6 +997,14 @@ impl BeaconChain { Ok(()) } + /// Persist fork choice to disk, writing immediately. + pub fn persist_fork_choice(&self) -> Result<(), Error> { + let _fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE); + let batch = vec![self.persist_fork_choice_in_batch()]; + self.store.hot_db.do_atomically(batch)?; + Ok(()) + } + /// Return a database operation for writing fork choice to disk. pub fn persist_fork_choice_in_batch(&self) -> KeyValueStoreOp { Self::persist_fork_choice_in_batch_standalone(&self.canonical_head.fork_choice_read_lock()) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 2b7ae9e4d1..033b472da0 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -14,7 +14,6 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; -use tokio::sync::oneshot; use tracing::{debug, error, info_span, Instrument}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ @@ -226,27 +225,45 @@ impl DataAvailabilityChecker { pub fn put_engine_blobs( &self, block_root: Hash256, - block_epoch: Epoch, blobs: FixedBlobSidecarList, - data_columns_recv: Option>>, ) -> Result, AvailabilityCheckError> { - // `data_columns_recv` is always Some if block_root is post-PeerDAS - if let Some(data_columns_recv) = data_columns_recv { - self.availability_cache.put_computed_data_columns_recv( - block_root, - block_epoch, - data_columns_recv, - ) - } else { - let seen_timestamp = self - .slot_clock - .now_duration() - .ok_or(AvailabilityCheckError::SlotClockError)?; - self.availability_cache.put_kzg_verified_blobs( - block_root, - KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp), - ) - } + let seen_timestamp = self + .slot_clock + .now_duration() + .ok_or(AvailabilityCheckError::SlotClockError)?; + self.availability_cache.put_kzg_verified_blobs( + block_root, + KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp), + ) + } + + /// Put a list of data columns computed from blobs received from the EL pool into the + /// availability cache. + /// + /// This DOES NOT perform KZG proof and inclusion proof verification because + /// - The KZG proofs should have been verified by the trusted EL. + /// - The KZG commitments inclusion proof should have been constructed immediately prior to + /// calling this function so they are assumed to be valid. + /// + /// This method is used if the EL already has the blobs and returns them via the `getBlobsV2` + /// engine method. + /// More details in [fetch_blobs.rs](https://github.com/sigp/lighthouse/blob/44f8add41ea2252769bb967864af95b3c13af8ca/beacon_node/beacon_chain/src/fetch_blobs.rs). + pub fn put_engine_data_columns( + &self, + block_root: Hash256, + data_columns: DataColumnSidecarList, + ) -> Result, AvailabilityCheckError> { + let kzg_verified_custody_columns = data_columns + .into_iter() + .map(|d| { + KzgVerifiedCustodyDataColumn::from_asserted_custody( + KzgVerifiedDataColumn::from_verified(d), + ) + }) + .collect::>(); + + self.availability_cache + .put_kzg_verified_data_columns(block_root, kzg_verified_custody_columns) } /// Check if we've cached other blobs for this block. If it completes a set and we also @@ -704,9 +721,6 @@ pub enum AvailableBlockData { Blobs(BlobSidecarList), /// Block is post-PeerDAS and has more than zero blobs DataColumns(DataColumnSidecarList), - /// Block is post-PeerDAS, has more than zero blobs and we recomputed the columns from the EL's - /// mempool blobs - DataColumnsRecv(oneshot::Receiver>), } /// A fully available block that is ready to be imported into fork choice. @@ -756,7 +770,6 @@ impl AvailableBlock { AvailableBlockData::NoData => false, AvailableBlockData::Blobs(..) => true, AvailableBlockData::DataColumns(_) => false, - AvailableBlockData::DataColumnsRecv(_) => false, } } @@ -782,9 +795,6 @@ impl AvailableBlock { AvailableBlockData::DataColumns(data_columns) => { AvailableBlockData::DataColumns(data_columns.clone()) } - AvailableBlockData::DataColumnsRecv(_) => { - return Err("Can't clone DataColumnsRecv".to_owned()) - } }, blobs_available_timestamp: self.blobs_available_timestamp, spec: self.spec.clone(), diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index f38a3b8b9c..f5fd24483a 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -13,13 +13,11 @@ use parking_lot::RwLock; use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; -use tokio::sync::oneshot; use tracing::debug; use types::blob_sidecar::BlobIdentifier; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, + Hash256, RuntimeFixedVector, RuntimeVariableList, SignedBeaconBlock, }; /// This represents the components of a partially available block @@ -32,12 +30,6 @@ pub struct PendingComponents { pub verified_data_columns: Vec>, pub executed_block: Option>, pub reconstruction_started: bool, - /// Receiver for data columns that are computed asynchronously; - /// - /// If `data_column_recv` is `Some`, it means data column computation or reconstruction has been - /// started. This can happen either via engine blobs fetching or data column reconstruction - /// (triggered when >= 50% columns are received via gossip). - pub data_column_recv: Option>>, } impl PendingComponents { @@ -202,13 +194,8 @@ impl PendingComponents { Some(AvailableBlockData::DataColumns(data_columns)) } Ordering::Less => { - // The data_columns_recv is an infallible promise that we will receive all expected - // columns, so we consider the block available. - // We take the receiver as it can't be cloned, and make_available should never - // be called again once it returns `Some`. - self.data_column_recv - .take() - .map(AvailableBlockData::DataColumnsRecv) + // Not enough data columns received yet + None } } } else { @@ -261,7 +248,6 @@ impl PendingComponents { .max(), // TODO(das): To be fixed with https://github.com/sigp/lighthouse/pull/6850 AvailableBlockData::DataColumns(_) => None, - AvailableBlockData::DataColumnsRecv(_) => None, }; let AvailabilityPendingExecutedBlock { @@ -293,7 +279,6 @@ impl PendingComponents { verified_data_columns: vec![], executed_block: None, reconstruction_started: false, - data_column_recv: None, } } @@ -331,17 +316,11 @@ impl PendingComponents { } else { "?" }; - let data_column_recv_count = if self.data_column_recv.is_some() { - 1 - } else { - 0 - }; format!( - "block {} data_columns {}/{} data_columns_recv {}", + "block {} data_columns {}/{}", block_count, self.verified_data_columns.len(), custody_columns_count, - data_column_recv_count, ) } else { let num_expected_blobs = if let Some(block) = self.get_cached_block() { @@ -498,7 +477,6 @@ impl DataAvailabilityCheckerInner { self.state_cache.recover_pending_executed_block(block) })? { // We keep the pending components in the availability cache during block import (#5845). - // `data_column_recv` is returned as part of the available block and is no longer needed here. write_lock.put(block_root, pending_components); drop(write_lock); Ok(Availability::Available(Box::new(available_block))) @@ -551,55 +529,6 @@ impl DataAvailabilityCheckerInner { self.state_cache.recover_pending_executed_block(block) })? { // We keep the pending components in the availability cache during block import (#5845). - // `data_column_recv` is returned as part of the available block and is no longer needed here. - write_lock.put(block_root, pending_components); - drop(write_lock); - Ok(Availability::Available(Box::new(available_block))) - } else { - write_lock.put(block_root, pending_components); - Ok(Availability::MissingComponents(block_root)) - } - } - - /// The `data_column_recv` parameter is a `Receiver` for data columns that are computed - /// asynchronously. This method is used if the EL already has the blobs and returns them via the - /// `getBlobsV1` engine method. More details in [fetch_blobs.rs](https://github.com/sigp/lighthouse/blob/44f8add41ea2252769bb967864af95b3c13af8ca/beacon_node/beacon_chain/src/fetch_blobs.rs). - pub fn put_computed_data_columns_recv( - &self, - block_root: Hash256, - block_epoch: Epoch, - data_column_recv: oneshot::Receiver>, - ) -> Result, AvailabilityCheckError> { - let mut write_lock = self.critical.write(); - - // Grab existing entry or create a new entry. - let mut pending_components = write_lock - .pop_entry(&block_root) - .map(|(_, v)| v) - .unwrap_or_else(|| { - PendingComponents::empty( - block_root, - self.spec.max_blobs_per_block(block_epoch) as usize, - ) - }); - - // We have all the blobs from engine, and have started computing data columns. We store the - // receiver in `PendingComponents` for later use when importing the block. - // TODO(das): Error or log if we overwrite a prior receiver https://github.com/sigp/lighthouse/issues/6764 - pending_components.data_column_recv = Some(data_column_recv); - - debug!( - component = "data_columns_recv", - ?block_root, - status = pending_components.status_str(block_epoch, &self.spec), - "Component added to data availability checker" - ); - - if let Some(available_block) = pending_components.make_available(&self.spec, |block| { - self.state_cache.recover_pending_executed_block(block) - })? { - // We keep the pending components in the availability cache during block import (#5845). - // `data_column_recv` is returned as part of the available block and is no longer needed here. write_lock.put(block_root, pending_components); drop(write_lock); Ok(Availability::Available(Box::new(available_block))) @@ -694,7 +623,6 @@ impl DataAvailabilityCheckerInner { self.state_cache.recover_pending_executed_block(block) })? { // We keep the pending components in the availability cache during block import (#5845). - // `data_column_recv` is returned as part of the available block and is no longer needed here. write_lock.put(block_root, pending_components); drop(write_lock); Ok(Availability::Available(Box::new(available_block))) @@ -920,7 +848,6 @@ mod test { state, parent_block, parent_eth1_finalization_data, - confirmed_state_roots: vec![], consensus_context, }; @@ -1305,7 +1232,6 @@ mod pending_components_tests { eth1_data: Default::default(), eth1_deposit_index: 0, }, - confirmed_state_roots: vec![], consensus_context: ConsensusContext::new(Slot::new(0)), }, payload_verification_outcome: PayloadVerificationOutcome { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 09d0563a4a..5fe674f30c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -7,26 +7,21 @@ use crate::{ }; use lru::LruCache; use parking_lot::RwLock; -use ssz_derive::{Decode, Encode}; use state_processing::BlockReplayer; use std::sync::Arc; use store::OnDiskConsensusContext; use types::beacon_block_body::KzgCommitments; -use types::{ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc}; use types::{BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; /// This mirrors everything in the `AvailabilityPendingExecutedBlock`, except /// that it is much smaller because it contains only a state root instead of /// a full `BeaconState`. -#[derive(Encode, Decode, Clone)] +#[derive(Clone)] pub struct DietAvailabilityPendingExecutedBlock { - #[ssz(with = "ssz_tagged_signed_beacon_block_arc")] block: Arc>, state_root: Hash256, - #[ssz(with = "ssz_tagged_signed_beacon_block")] parent_block: SignedBeaconBlock>, parent_eth1_finalization_data: Eth1FinalizationData, - confirmed_state_roots: Vec, consensus_context: OnDiskConsensusContext, payload_verification_outcome: PayloadVerificationOutcome, custody_columns_count: usize, @@ -108,7 +103,6 @@ impl StateLRUCache { state_root, parent_block: executed_block.import_data.parent_block, parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, - confirmed_state_roots: executed_block.import_data.confirmed_state_roots, consensus_context: OnDiskConsensusContext::from_consensus_context( executed_block.import_data.consensus_context, ), @@ -138,7 +132,6 @@ impl StateLRUCache { state, parent_block: diet_executed_block.parent_block, parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, - confirmed_state_roots: diet_executed_block.confirmed_state_roots, consensus_context: diet_executed_block .consensus_context .into_consensus_context(), @@ -227,7 +220,6 @@ impl From> state_root: value.import_data.state.canonical_root().unwrap(), parent_block: value.import_data.parent_block, parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, - confirmed_state_roots: value.import_data.confirmed_state_roots, consensus_context: OnDiskConsensusContext::from_consensus_context( value.import_data.consensus_context, ), diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 2f95d834b5..57efbb0a77 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -141,13 +141,23 @@ pub enum GossipDataColumnError { /// /// The column sidecar is invalid and the peer is faulty UnexpectedDataColumn, - /// The data column length must be equal to the number of commitments/proofs, otherwise the + /// The data column length must be equal to the number of commitments, otherwise the /// sidecar is invalid. /// /// ## Peer scoring /// /// The column sidecar is invalid and the peer is faulty - InconsistentCommitmentsOrProofLength, + InconsistentCommitmentsLength { + cells_len: usize, + commitments_len: usize, + }, + /// The data column length must be equal to the number of proofs, otherwise the + /// sidecar is invalid. + /// + /// ## Peer scoring + /// + /// The column sidecar is invalid and the peer is faulty + InconsistentProofsLength { cells_len: usize, proofs_len: usize }, } impl From for GossipDataColumnError { @@ -240,6 +250,14 @@ impl KzgVerifiedDataColumn { verify_kzg_for_data_column(data_column, kzg) } + /// Create a `KzgVerifiedDataColumn` from `data_column` that are already KZG verified. + /// + /// This should be used with caution, as used incorrectly it could result in KZG verification + /// being skipped and invalid data_columns being deemed valid. + pub fn from_verified(data_column: Arc>) -> Self { + Self { data: data_column } + } + pub fn from_batch( data_columns: Vec>>, kzg: &Kzg, @@ -473,10 +491,23 @@ fn verify_data_column_sidecar( if data_column.kzg_commitments.is_empty() { return Err(GossipDataColumnError::UnexpectedDataColumn); } - if data_column.column.len() != data_column.kzg_commitments.len() - || data_column.column.len() != data_column.kzg_proofs.len() - { - return Err(GossipDataColumnError::InconsistentCommitmentsOrProofLength); + + let cells_len = data_column.column.len(); + let commitments_len = data_column.kzg_commitments.len(); + let proofs_len = data_column.kzg_proofs.len(); + + if cells_len != commitments_len { + return Err(GossipDataColumnError::InconsistentCommitmentsLength { + cells_len, + commitments_len, + }); + } + + if cells_len != proofs_len { + return Err(GossipDataColumnError::InconsistentProofsLength { + cells_len, + proofs_len, + }); } Ok(()) diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index a90911026c..5665ef3775 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -33,7 +33,7 @@ pub struct CacheItem { /// /// - Produce an attestation without using `chain.canonical_head`. /// - Verify that a block root exists (i.e., will be imported in the future) during attestation -/// verification. +/// verification. /// - Provide a block which can be sent to peers via RPC. #[derive(Default)] pub struct EarlyAttesterCache { @@ -74,10 +74,6 @@ impl EarlyAttesterCache { AvailableBlockData::NoData => (None, None), AvailableBlockData::Blobs(blobs) => (Some(blobs.clone()), None), AvailableBlockData::DataColumns(data_columns) => (None, Some(data_columns.clone())), - // TODO(das): Once the columns are received, they will not be available in - // the early attester cache. If someone does a query to us via RPC we - // will get downscored. - AvailableBlockData::DataColumnsRecv(_) => (None, None), }; let item = CacheItem { diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 43429b726c..8a79bff4c7 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -362,6 +362,12 @@ pub struct DummyEth1ChainBackend(PhantomData); impl Eth1ChainBackend for DummyEth1ChainBackend { /// Produce some deterministic junk based upon the current epoch. fn eth1_data(&self, state: &BeaconState, _spec: &ChainSpec) -> Result { + // [New in Electra:EIP6110] + if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { + if state.eth1_deposit_index() == deposit_requests_start_index { + return Ok(state.eth1_data().clone()); + } + } let current_epoch = state.current_epoch(); let slots_per_voting_period = E::slots_per_eth1_voting_period() as u64; let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; @@ -456,6 +462,12 @@ impl CachingEth1Backend { impl Eth1ChainBackend for CachingEth1Backend { fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { + // [New in Electra:EIP6110] + if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { + if state.eth1_deposit_index() == deposit_requests_start_index { + return Ok(state.eth1_data().clone()); + } + } let period = E::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot() / period) * period; let voting_period_start_seconds = slot_start_seconds( diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs index 84618ceab0..0b9d19e156 100644 --- a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -461,7 +461,7 @@ pub mod tests { let last_finalized_eth1 = eth1s_by_count .range(0..(finalized_deposits + 1)) .map(|(_, eth1)| eth1) - .last() + .next_back() .cloned(); assert_eq!( eth1cache.finalize(finalized_checkpoint), diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index ceb563ffc2..3b576da1c7 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -7,34 +7,52 @@ //! on P2P gossip to the network. From PeerDAS onwards, together with the increase in blob count, //! broadcasting blobs requires a much higher bandwidth, and is only done by high capacity //! supernodes. + use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::kzg_utils::blobs_to_data_column_sidecars; use crate::observed_data_sidecars::DoNotObserve; -use crate::{metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError}; -use execution_layer::json_structures::BlobAndProofV1; +use crate::{ + metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, + BlockError, +}; +use execution_layer::json_structures::{BlobAndProofV1, BlobAndProofV2}; use execution_layer::Error as ExecutionLayerError; -use metrics::{inc_counter, inc_counter_by, TryExt}; +use metrics::{inc_counter, TryExt}; use ssz_types::FixedVector; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; +use std::collections::HashSet; use std::sync::Arc; -use tokio::sync::oneshot; -use tracing::{debug, error}; +use tracing::debug; use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; +use types::data_column_sidecar::DataColumnSidecarError; use types::{ - BeaconStateError, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnSidecarList, EthSpec, - FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, + BeaconStateError, Blob, BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecarList, EthSpec, + FullPayload, Hash256, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, VersionedHash, }; +/// Blobs or data column to be published to the gossip network. pub enum BlobsOrDataColumns { Blobs(Vec>), DataColumns(DataColumnSidecarList), } +/// Result from engine get blobs to be passed onto `DataAvailabilityChecker`. +/// +/// The blobs are retrieved from a trusted EL and columns are computed locally, therefore they are +/// considered valid without requiring extra validation. +pub enum EngineGetBlobsOutput { + Blobs(FixedBlobSidecarList), + /// A filtered list of custody data columns to be imported into the `DataAvailabilityChecker`. + CustodyColumns(DataColumnSidecarList), +} + #[derive(Debug)] pub enum FetchEngineBlobError { BeaconStateError(BeaconStateError), + BeaconChainError(BeaconChainError), BlobProcessingError(BlockError), BlobSidecarError(BlobSidecarError), + DataColumnSidecarError(DataColumnSidecarError), ExecutionLayerMissing, InternalError(String), GossipBlob(GossipBlobError), @@ -48,6 +66,7 @@ pub async fn fetch_and_process_engine_blobs( chain: Arc>, block_root: Hash256, block: Arc>>, + custody_columns: HashSet, publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, ) -> Result, FetchEngineBlobError> { let versioned_hashes = if let Some(kzg_commitments) = block @@ -66,20 +85,53 @@ pub async fn fetch_and_process_engine_blobs( return Ok(None); }; - let num_expected_blobs = versioned_hashes.len(); + debug!( + num_expected_blobs = versioned_hashes.len(), + "Fetching blobs from the EL" + ); + if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + fetch_and_process_blobs_v2( + chain, + block_root, + block, + versioned_hashes, + custody_columns, + publish_fn, + ) + .await + } else { + fetch_and_process_blobs_v1(chain, block_root, block, versioned_hashes, publish_fn).await + } +} + +async fn fetch_and_process_blobs_v1( + chain: Arc>, + block_root: Hash256, + block: Arc>, + versioned_hashes: Vec, + publish_fn: impl Fn(BlobsOrDataColumns) + Send + Sized, +) -> Result, FetchEngineBlobError> { + let num_expected_blobs = versioned_hashes.len(); let execution_layer = chain .execution_layer .as_ref() .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; + metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64); debug!(num_expected_blobs, "Fetching blobs from the EL"); let response = execution_layer - .get_blobs(versioned_hashes) + .get_blobs_v1(versioned_hashes) .await + .inspect_err(|_| { + inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); + }) .map_err(FetchEngineBlobError::RequestFailed)?; - if response.is_empty() || response.iter().all(|opt| opt.is_none()) { + let num_fetched_blobs = response.iter().filter(|opt| opt.is_some()).count(); + metrics::observe(&metrics::BLOBS_FROM_EL_RECEIVED, num_fetched_blobs as f64); + + if num_fetched_blobs == 0 { debug!(num_expected_blobs, "No blobs fetched from the EL"); inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); return Ok(None); @@ -99,20 +151,6 @@ pub async fn fetch_and_process_engine_blobs( &chain.spec, )?; - let num_fetched_blobs = fixed_blob_sidecar_list - .iter() - .filter(|b| b.is_some()) - .count(); - - inc_counter_by( - &metrics::BLOBS_FROM_EL_EXPECTED_TOTAL, - num_expected_blobs as u64, - ); - inc_counter_by( - &metrics::BLOBS_FROM_EL_RECEIVED_TOTAL, - num_fetched_blobs as u64, - ); - // Gossip verify blobs before publishing. This prevents blobs with invalid KZG proofs from // the EL making it into the data availability checker. We do not immediately add these // blobs to the observed blobs/columns cache because we want to allow blobs/columns to arrive on gossip @@ -132,59 +170,9 @@ pub async fn fetch_and_process_engine_blobs( .collect::, _>>() .map_err(FetchEngineBlobError::GossipBlob)?; - let peer_das_enabled = chain.spec.is_peer_das_enabled_for_epoch(block.epoch()); - - let data_columns_receiver_opt = if peer_das_enabled { - // Partial blobs response isn't useful for PeerDAS, so we don't bother building and publishing data columns. - if num_fetched_blobs != num_expected_blobs { - debug!( - info = "Unable to compute data columns", - num_fetched_blobs, num_expected_blobs, "Not all blobs fetched from the EL" - ); - return Ok(None); - } - - if chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&block_root) - { - // Avoid computing columns if block has already been imported. - debug!( - info = "block has already been imported", - "Ignoring EL blobs response" - ); - return Ok(None); - } - - if chain - .canonical_head - .fork_choice_read_lock() - .contains_block(&block_root) - { - // Avoid computing columns if block has already been imported. - debug!( - info = "block has already been imported", - "Ignoring EL blobs response" - ); - return Ok(None); - } - - let data_columns_receiver = spawn_compute_and_publish_data_columns_task( - &chain, - block.clone(), - fixed_blob_sidecar_list.clone(), - publish_fn, - ); - - Some(data_columns_receiver) - } else { - if !blobs_to_import_and_publish.is_empty() { - publish_fn(BlobsOrDataColumns::Blobs(blobs_to_import_and_publish)); - } - - None - }; + if !blobs_to_import_and_publish.is_empty() { + publish_fn(BlobsOrDataColumns::Blobs(blobs_to_import_and_publish)); + } debug!(num_fetched_blobs, "Processing engine blobs"); @@ -192,8 +180,7 @@ pub async fn fetch_and_process_engine_blobs( .process_engine_blobs( block.slot(), block_root, - fixed_blob_sidecar_list.clone(), - data_columns_receiver_opt, + EngineGetBlobsOutput::Blobs(fixed_blob_sidecar_list.clone()), ) .await .map_err(FetchEngineBlobError::BlobProcessingError)?; @@ -201,67 +188,140 @@ pub async fn fetch_and_process_engine_blobs( Ok(Some(availability_processing_status)) } -/// Spawn a blocking task here for long computation tasks, so it doesn't block processing, and it -/// allows blobs / data columns to propagate without waiting for processing. -/// -/// An `mpsc::Sender` is then used to send the produced data columns to the `beacon_chain` for it -/// to be persisted, **after** the block is made attestable. -/// -/// The reason for doing this is to make the block available and attestable as soon as possible, -/// while maintaining the invariant that block and data columns are persisted atomically. -fn spawn_compute_and_publish_data_columns_task( +async fn fetch_and_process_blobs_v2( + chain: Arc>, + block_root: Hash256, + block: Arc>, + versioned_hashes: Vec, + custody_columns_indices: HashSet, + publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, +) -> Result, FetchEngineBlobError> { + let num_expected_blobs = versioned_hashes.len(); + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(FetchEngineBlobError::ExecutionLayerMissing)?; + + metrics::observe(&metrics::BLOBS_FROM_EL_EXPECTED, num_expected_blobs as f64); + debug!(num_expected_blobs, "Fetching blobs from the EL"); + let response = execution_layer + .get_blobs_v2(versioned_hashes) + .await + .inspect_err(|_| { + inc_counter(&metrics::BLOBS_FROM_EL_ERROR_TOTAL); + }) + .map_err(FetchEngineBlobError::RequestFailed)?; + + let (blobs, proofs): (Vec<_>, Vec<_>) = response + .into_iter() + .filter_map(|blob_and_proof_opt| { + blob_and_proof_opt.map(|blob_and_proof| { + let BlobAndProofV2 { blob, proofs } = blob_and_proof; + (blob, proofs) + }) + }) + .unzip(); + + let num_fetched_blobs = blobs.len(); + metrics::observe(&metrics::BLOBS_FROM_EL_RECEIVED, num_fetched_blobs as f64); + + // Partial blobs response isn't useful for PeerDAS, so we don't bother building and publishing data columns. + if num_fetched_blobs != num_expected_blobs { + debug!( + info = "Unable to compute data columns", + num_fetched_blobs, num_expected_blobs, "Not all blobs fetched from the EL" + ); + inc_counter(&metrics::BLOBS_FROM_EL_MISS_TOTAL); + return Ok(None); + } else { + inc_counter(&metrics::BLOBS_FROM_EL_HIT_TOTAL); + } + + if chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + // Avoid computing columns if block has already been imported. + debug!( + info = "block has already been imported", + "Ignoring EL blobs response" + ); + return Ok(None); + } + + let custody_columns = compute_and_publish_data_columns( + &chain, + block.clone(), + blobs, + proofs, + custody_columns_indices, + publish_fn, + ) + .await?; + + debug!(num_fetched_blobs, "Processing engine blobs"); + + let availability_processing_status = chain + .process_engine_blobs( + block.slot(), + block_root, + EngineGetBlobsOutput::CustodyColumns(custody_columns), + ) + .await + .map_err(FetchEngineBlobError::BlobProcessingError)?; + + Ok(Some(availability_processing_status)) +} + +/// Offload the data column computation to a blocking task to avoid holding up the async runtime. +async fn compute_and_publish_data_columns( chain: &Arc>, block: Arc>>, - blobs: FixedBlobSidecarList, + blobs: Vec>, + proofs: Vec>, + custody_columns_indices: HashSet, publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, -) -> oneshot::Receiver>>> { +) -> Result, FetchEngineBlobError> { let chain_cloned = chain.clone(); - let (data_columns_sender, data_columns_receiver) = oneshot::channel(); + chain + .spawn_blocking_handle( + move || { + let mut timer = metrics::start_timer_vec( + &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, + &[&blobs.len().to_string()], + ); - chain.task_executor.spawn_blocking( - move || { - let mut timer = metrics::start_timer_vec( - &metrics::DATA_COLUMN_SIDECAR_COMPUTATION, - &[&blobs.len().to_string()], - ); - let blob_refs = blobs - .iter() - .filter_map(|b| b.as_ref().map(|b| &b.blob)) - .collect::>(); - let data_columns_result = blobs_to_data_column_sidecars( - &blob_refs, - &block, - &chain_cloned.kzg, - &chain_cloned.spec, - ) - .discard_timer_on_break(&mut timer); - drop(timer); + let blob_refs = blobs.iter().collect::>(); + let cell_proofs = proofs.into_iter().flatten().collect(); + let data_columns_result = blobs_to_data_column_sidecars( + &blob_refs, + cell_proofs, + &block, + &chain_cloned.kzg, + &chain_cloned.spec, + ) + .discard_timer_on_break(&mut timer); + drop(timer); - let all_data_columns = match data_columns_result { - Ok(d) => d, - Err(e) => { - error!( - error = ?e, - "Failed to build data column sidecars from blobs" - ); - return; - } - }; + // This filtering ensures we only import and publish the custody columns. + // `DataAvailabilityChecker` requires a strict match on custody columns count to + // consider a block available. + let custody_columns = data_columns_result + .map(|mut data_columns| { + data_columns.retain(|col| custody_columns_indices.contains(&col.index)); + data_columns + }) + .map_err(FetchEngineBlobError::DataColumnSidecarError)?; - if data_columns_sender.send(all_data_columns.clone()).is_err() { - // Data column receiver have been dropped - block may have already been imported. - // This race condition exists because gossip columns may arrive and trigger block - // import during the computation. Here we just drop the computed columns. - debug!("Failed to send computed data columns"); - return; - }; - - publish_fn(BlobsOrDataColumns::DataColumns(all_data_columns)); - }, - "compute_and_publish_data_columns", - ); - - data_columns_receiver + publish_fn(BlobsOrDataColumns::DataColumns(custody_columns.clone())); + Ok(custody_columns) + }, + "compute_and_publish_data_columns", + ) + .await + .map_err(FetchEngineBlobError::BeaconChainError) + .and_then(|r| r) } fn build_blob_sidecars( diff --git a/beacon_node/beacon_chain/src/fulu_readiness.rs b/beacon_node/beacon_chain/src/fulu_readiness.rs index 872fe58f2b..1107acad74 100644 --- a/beacon_node/beacon_chain/src/fulu_readiness.rs +++ b/beacon_node/beacon_chain/src/fulu_readiness.rs @@ -1,7 +1,7 @@ //! Provides tools for checking if a node is ready for the Fulu upgrade. use crate::{BeaconChain, BeaconChainTypes}; -use execution_layer::http::{ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V4}; +use execution_layer::http::{ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V4}; use serde::{Deserialize, Serialize}; use std::fmt; use std::time::Duration; @@ -87,12 +87,12 @@ impl BeaconChain { Ok(capabilities) => { let mut missing_methods = String::from("Required Methods Unsupported:"); let mut all_good = true; - // TODO(fulu) switch to v5 when the EL is ready - if !capabilities.get_payload_v4 { + if !capabilities.get_payload_v5 { missing_methods.push(' '); - missing_methods.push_str(ENGINE_GET_PAYLOAD_V4); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V5); all_good = false; } + // TODO(fulu) switch to v5 when the EL is ready if !capabilities.new_payload_v4 { missing_methods.push(' '); missing_methods.push_str(ENGINE_NEW_PAYLOAD_V4); diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs deleted file mode 100644 index 9c06ef33a1..0000000000 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ /dev/null @@ -1,214 +0,0 @@ -use parking_lot::{RwLock, RwLockReadGuard}; -use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; -use types::{Hash256, Slot}; - -#[derive(Debug, PartialEq)] -pub enum Error { - MismatchingLengths { roots_len: usize, slots_len: usize }, -} - -/// Maintains a list of `BeaconChain` head block roots and slots. -/// -/// Each time a new block is imported, it should be applied to the `Self::register_block` function. -/// In order for this struct to be effective, every single block that is imported must be -/// registered here. -#[derive(Default, Debug)] -pub struct HeadTracker(pub RwLock>); - -pub type HeadTrackerReader<'a> = RwLockReadGuard<'a, HashMap>; - -impl HeadTracker { - /// Register a block with `Self`, so it may or may not be included in a `Self::heads` call. - /// - /// This function assumes that no block is imported without its parent having already been - /// imported. It cannot detect an error if this is not the case, it is the responsibility of - /// the upstream user. - pub fn register_block(&self, block_root: Hash256, parent_root: Hash256, slot: Slot) { - let mut map = self.0.write(); - map.remove(&parent_root); - map.insert(block_root, slot); - } - - /// Returns true iff `block_root` is a recognized head. - pub fn contains_head(&self, block_root: Hash256) -> bool { - self.0.read().contains_key(&block_root) - } - - /// Returns the list of heads in the chain. - pub fn heads(&self) -> Vec<(Hash256, Slot)> { - self.0 - .read() - .iter() - .map(|(root, slot)| (*root, *slot)) - .collect() - } - - /// Returns a `SszHeadTracker`, which contains all necessary information to restore the state - /// of `Self` at some later point. - /// - /// Should ONLY be used for tests, due to the potential for database races. - /// - /// See - #[cfg(test)] - pub fn to_ssz_container(&self) -> SszHeadTracker { - SszHeadTracker::from_map(&self.0.read()) - } - - /// Creates a new `Self` from the given `SszHeadTracker`, restoring `Self` to the same state of - /// the `Self` that created the `SszHeadTracker`. - pub fn from_ssz_container(ssz_container: &SszHeadTracker) -> Result { - let roots_len = ssz_container.roots.len(); - let slots_len = ssz_container.slots.len(); - - if roots_len != slots_len { - Err(Error::MismatchingLengths { - roots_len, - slots_len, - }) - } else { - let map = ssz_container - .roots - .iter() - .zip(ssz_container.slots.iter()) - .map(|(root, slot)| (*root, *slot)) - .collect::>(); - - Ok(Self(RwLock::new(map))) - } - } -} - -impl PartialEq for HeadTracker { - fn eq(&self, other: &HeadTracker) -> bool { - *self.0.read() == *other.0.read() - } -} - -/// Helper struct that is used to encode/decode the state of the `HeadTracker` as SSZ bytes. -/// -/// This is used when persisting the state of the `BeaconChain` to disk. -#[derive(Encode, Decode, Clone)] -pub struct SszHeadTracker { - roots: Vec, - slots: Vec, -} - -impl SszHeadTracker { - pub fn from_map(map: &HashMap) -> Self { - let (roots, slots) = map.iter().map(|(hash, slot)| (*hash, *slot)).unzip(); - SszHeadTracker { roots, slots } - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::{Decode, Encode}; - use types::{BeaconBlock, EthSpec, FixedBytesExtended, MainnetEthSpec}; - - type E = MainnetEthSpec; - - #[test] - fn block_add() { - let spec = &E::default_spec(); - - let head_tracker = HeadTracker::default(); - - for i in 0..16 { - let mut block: BeaconBlock = BeaconBlock::empty(spec); - let block_root = Hash256::from_low_u64_be(i); - - *block.slot_mut() = Slot::new(i); - *block.parent_root_mut() = if i == 0 { - Hash256::random() - } else { - Hash256::from_low_u64_be(i - 1) - }; - - head_tracker.register_block(block_root, block.parent_root(), block.slot()); - } - - assert_eq!( - head_tracker.heads(), - vec![(Hash256::from_low_u64_be(15), Slot::new(15))], - "should only have one head" - ); - - let mut block: BeaconBlock = BeaconBlock::empty(spec); - let block_root = Hash256::from_low_u64_be(42); - *block.slot_mut() = Slot::new(15); - *block.parent_root_mut() = Hash256::from_low_u64_be(14); - head_tracker.register_block(block_root, block.parent_root(), block.slot()); - - let heads = head_tracker.heads(); - - assert_eq!(heads.len(), 2, "should only have two heads"); - assert!( - heads - .iter() - .any(|(root, slot)| *root == Hash256::from_low_u64_be(15) && *slot == Slot::new(15)), - "should contain first head" - ); - assert!( - heads - .iter() - .any(|(root, slot)| *root == Hash256::from_low_u64_be(42) && *slot == Slot::new(15)), - "should contain second head" - ); - } - - #[test] - fn empty_round_trip() { - let non_empty = HeadTracker::default(); - for i in 0..16 { - non_empty.0.write().insert(Hash256::random(), Slot::new(i)); - } - let bytes = non_empty.to_ssz_container().as_ssz_bytes(); - - assert_eq!( - HeadTracker::from_ssz_container( - &SszHeadTracker::from_ssz_bytes(&bytes).expect("should decode") - ), - Ok(non_empty), - "non_empty should pass round trip" - ); - } - - #[test] - fn non_empty_round_trip() { - let non_empty = HeadTracker::default(); - for i in 0..16 { - non_empty.0.write().insert(Hash256::random(), Slot::new(i)); - } - let bytes = non_empty.to_ssz_container().as_ssz_bytes(); - - assert_eq!( - HeadTracker::from_ssz_container( - &SszHeadTracker::from_ssz_bytes(&bytes).expect("should decode") - ), - Ok(non_empty), - "non_empty should pass round trip" - ); - } - - #[test] - fn bad_length() { - let container = SszHeadTracker { - roots: vec![Hash256::random()], - slots: vec![], - }; - let bytes = container.as_ssz_bytes(); - - assert_eq!( - HeadTracker::from_ssz_container( - &SszHeadTracker::from_ssz_bytes(&bytes).expect("should decode") - ), - Err(Error::MismatchingLengths { - roots_len: 1, - slots_len: 0 - }), - "should fail decoding with bad lengths" - ); - } -} diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index ee51964910..348e6d52a6 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -132,7 +132,7 @@ impl BeaconChain { AvailableBlockData::Blobs(..) => { new_oldest_blob_slot = Some(block.slot()); } - AvailableBlockData::DataColumns(_) | AvailableBlockData::DataColumnsRecv(_) => { + AvailableBlockData::DataColumns(_) => { new_oldest_data_column_slot = Some(block.slot()); } } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 06cce14144..eaaa23130d 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -1,14 +1,15 @@ use kzg::{ - Blob as KzgBlob, Bytes48, CellRef as KzgCellRef, CellsAndKzgProofs, Error as KzgError, Kzg, + Blob as KzgBlob, Bytes48, Cell as KzgCell, CellRef as KzgCellRef, CellsAndKzgProofs, + Error as KzgError, Kzg, CELLS_PER_EXT_BLOB, }; use rayon::prelude::*; -use ssz_types::FixedVector; +use ssz_types::{FixedVector, VariableList}; use std::sync::Arc; use types::beacon_block_body::KzgCommitments; use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; use types::{ Blob, BlobSidecar, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecar, - DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, + DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlindedBeaconBlock, }; @@ -43,6 +44,33 @@ pub fn validate_blob( kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) } +/// Validates a list of blobs along with their corresponding KZG commitments and +/// cell proofs for the extended blobs. +pub fn validate_blobs_and_cell_proofs( + kzg: &Kzg, + blobs: Vec<&Blob>, + cell_proofs: &[KzgProof], + kzg_commitments: &KzgCommitments, +) -> Result<(), KzgError> { + let cells = compute_cells::(&blobs, kzg)?; + let cell_refs = cells.iter().map(|cell| cell.as_ref()).collect::>(); + let cell_indices = (0..blobs.len()) + .flat_map(|_| 0..CELLS_PER_EXT_BLOB as u64) + .collect::>(); + + let proofs = cell_proofs + .iter() + .map(|&proof| Bytes48::from(proof)) + .collect::>(); + + let commitments = kzg_commitments + .iter() + .flat_map(|&commitment| std::iter::repeat_n(Bytes48::from(commitment), CELLS_PER_EXT_BLOB)) + .collect::>(); + + kzg.verify_cell_proof_batch(&cell_refs, &proofs, cell_indices, &commitments) +} + /// Validate a batch of `DataColumnSidecar`. pub fn validate_data_columns<'a, E: EthSpec, I>( kzg: &Kzg, @@ -148,6 +176,7 @@ pub fn verify_kzg_proof( /// Build data column sidecars from a signed beacon block and its blobs. pub fn blobs_to_data_column_sidecars( blobs: &[&Blob], + cell_proofs: Vec, block: &SignedBeaconBlock, kzg: &Kzg, spec: &ChainSpec, @@ -164,15 +193,28 @@ pub fn blobs_to_data_column_sidecars( let kzg_commitments_inclusion_proof = block.message().body().kzg_commitments_merkle_proof()?; let signed_block_header = block.signed_block_header(); + let proof_chunks = cell_proofs + .chunks_exact(spec.number_of_columns as usize) + .collect::>(); + // NOTE: assumes blob sidecars are ordered by index let blob_cells_and_proofs_vec = blobs .into_par_iter() - .map(|blob| { + .zip(proof_chunks.into_par_iter()) + .map(|(blob, proofs)| { let blob = blob .as_ref() .try_into() .expect("blob should have a guaranteed size due to FixedVector"); - kzg.compute_cells_and_proofs(blob) + + kzg.compute_cells(blob).map(|cells| { + ( + cells, + proofs + .try_into() + .expect("proof chunks should have exactly `number_of_columns` proofs"), + ) + }) }) .collect::, KzgError>>()?; @@ -186,6 +228,23 @@ pub fn blobs_to_data_column_sidecars( .map_err(DataColumnSidecarError::BuildSidecarFailed) } +pub fn compute_cells(blobs: &[&Blob], kzg: &Kzg) -> Result, KzgError> { + let cells_vec = blobs + .into_par_iter() + .map(|blob| { + let blob = blob + .as_ref() + .try_into() + .expect("blob should have a guaranteed size due to FixedVector"); + + kzg.compute_cells(blob) + }) + .collect::, KzgError>>()?; + + let cells_flattened: Vec = cells_vec.into_iter().flatten().collect(); + Ok(cells_flattened) +} + pub(crate) fn build_data_column_sidecars( kzg_commitments: KzgCommitments, kzg_commitments_inclusion_proof: FixedVector, @@ -236,7 +295,7 @@ pub(crate) fn build_data_column_sidecars( index: index as u64, column: DataColumn::::from(col), kzg_commitments: kzg_commitments.clone(), - kzg_proofs: KzgProofs::::from(proofs), + kzg_proofs: VariableList::from(proofs), signed_block_header: signed_block_header.clone(), kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), }) @@ -300,12 +359,7 @@ pub fn reconstruct_blobs( .collect(); let blob = Blob::::new(blob_bytes).map_err(|e| format!("{e:?}"))?; - let kzg_commitment = first_data_column - .kzg_commitments - .get(row_index) - .ok_or(format!("Missing KZG commitment for blob {row_index}"))?; - let kzg_proof = compute_blob_kzg_proof::(kzg, &blob, *kzg_commitment) - .map_err(|e| format!("{e:?}"))?; + let kzg_proof = KzgProof::empty(); BlobSidecar::::new_with_existing_proof( row_index, @@ -373,14 +427,15 @@ pub fn reconstruct_data_columns( mod test { use crate::kzg_utils::{ blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, + validate_blobs_and_cell_proofs, }; use bls::Signature; use eth2::types::BlobsBundle; use execution_layer::test_utils::generate_blobs; use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup}; use types::{ - beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, BlobsList, ChainSpec, - EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockFulu, BlobsList, ChainSpec, + EmptyBlock, EthSpec, ForkName, FullPayload, KzgProofs, MainnetEthSpec, SignedBeaconBlock, }; type E = MainnetEthSpec; @@ -389,32 +444,52 @@ mod test { // only load it once. #[test] fn test_build_data_columns_sidecars() { - let spec = E::default_spec(); + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let kzg = get_kzg(); test_build_data_columns_empty(&kzg, &spec); test_build_data_columns(&kzg, &spec); test_reconstruct_data_columns(&kzg, &spec); test_reconstruct_blobs_from_data_columns(&kzg, &spec); + test_verify_blob_and_cell_proofs(&kzg); + } + + #[track_caller] + fn test_verify_blob_and_cell_proofs(kzg: &Kzg) { + let (blobs_bundle, _) = generate_blobs::(3, ForkName::Fulu).unwrap(); + let BlobsBundle { + blobs, + commitments, + proofs, + } = blobs_bundle; + + let result = + validate_blobs_and_cell_proofs::(kzg, blobs.iter().collect(), &proofs, &commitments); + + assert!(result.is_ok()); } #[track_caller] fn test_build_data_columns_empty(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 0; - let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); assert!(column_sidecars.is_empty()); } #[track_caller] fn test_build_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 6; - let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); let block_kzg_commitments = signed_block .message() @@ -448,10 +523,12 @@ mod test { #[track_caller] fn test_reconstruct_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 6; - let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); // Now reconstruct let reconstructed_columns = reconstruct_data_columns( @@ -469,10 +546,12 @@ mod test { #[track_caller] fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) { let num_of_blobs = 6; - let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); let column_sidecars = - blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); // Now reconstruct let signed_blinded_block = signed_block.into(); @@ -504,11 +583,15 @@ mod test { Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg") } - fn create_test_block_and_blobs( + fn create_test_fulu_block_and_blobs( num_of_blobs: usize, spec: &ChainSpec, - ) -> (SignedBeaconBlock, BlobsList) { - let mut block = BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)); + ) -> ( + SignedBeaconBlock>, + BlobsList, + KzgProofs, + ) { + let mut block = BeaconBlock::Fulu(BeaconBlockFulu::empty(spec)); let mut body = block.body_mut(); let blob_kzg_commitments = body.blob_kzg_commitments_mut().unwrap(); *blob_kzg_commitments = @@ -516,12 +599,12 @@ mod test { .unwrap(); let mut signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); - - let (blobs_bundle, _) = generate_blobs::(num_of_blobs).unwrap(); + let fork = signed_block.fork_name_unchecked(); + let (blobs_bundle, _) = generate_blobs::(num_of_blobs, fork).unwrap(); let BlobsBundle { blobs, commitments, - proofs: _, + proofs, } = blobs_bundle; *signed_block @@ -530,6 +613,6 @@ mod test { .blob_kzg_commitments_mut() .unwrap() = commitments; - (signed_block, blobs) + (signed_block, blobs, proofs) } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 48168aeb02..5b79312d37 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -33,7 +33,6 @@ pub mod fork_choice_signal; pub mod fork_revert; pub mod fulu_readiness; pub mod graffiti_calculator; -mod head_tracker; pub mod historical_blocks; pub mod kzg_utils; pub mod light_client_finality_update_verification; @@ -56,6 +55,7 @@ pub mod schema_change; pub mod shuffling_cache; pub mod single_attestation; pub mod state_advance_timer; +pub mod summaries_dag; pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index d1c7a2a5df..57012161ec 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -601,12 +601,6 @@ pub static BALANCES_CACHE_MISSES: LazyLock> = LazyLock::new(| /* * Persisting BeaconChain components to disk */ -pub static PERSIST_HEAD: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_persist_head", - "Time taken to persist the canonical head", - ) -}); pub static PERSIST_OP_POOL: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_persist_op_pool", @@ -1662,28 +1656,37 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = LazyLock::new(|| { try_create_int_counter( "beacon_blobs_from_el_hit_total", - "Number of blob batches fetched from the execution layer", + "Number of non-empty blob batches fetched from the execution layer", ) }); pub static BLOBS_FROM_EL_MISS_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( "beacon_blobs_from_el_miss_total", - "Number of blob batches failed to fetch from the execution layer", + "Number of empty or incomplete blob responses from the execution layer", ) }); -pub static BLOBS_FROM_EL_EXPECTED_TOTAL: LazyLock> = LazyLock::new(|| { +pub static BLOBS_FROM_EL_ERROR_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_blobs_from_el_expected_total", + "beacon_blobs_from_el_error_total", + "Number of failed blob fetches from the execution layer", + ) +}); + +pub static BLOBS_FROM_EL_EXPECTED: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_blobs_from_el_expected", "Number of blobs expected from the execution layer", + Ok(vec![0.0, 3.0, 6.0, 9.0, 12.0, 18.0, 24.0, 30.0]), ) }); -pub static BLOBS_FROM_EL_RECEIVED_TOTAL: LazyLock> = LazyLock::new(|| { - try_create_int_counter( +pub static BLOBS_FROM_EL_RECEIVED: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( "beacon_blobs_from_el_received_total", "Number of blobs fetched from the execution layer", + linear_buckets(0.0, 4.0, 20), ) }); diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index cda5b34103..94fa0a1890 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -1,22 +1,16 @@ -use crate::beacon_chain::BEACON_CHAIN_DB_KEY; use crate::errors::BeaconChainError; -use crate::head_tracker::{HeadTracker, SszHeadTracker}; -use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; +use crate::summaries_dag::{DAGStateSummaryV22, Error as SummariesDagError, StateSummariesDAG}; use parking_lot::Mutex; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::mem; use std::sync::{mpsc, Arc}; use std::thread; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::{migrate_database, HotColdDBError}; -use store::iter::RootsIterator; -use store::{Error, ItemStore, StoreItem, StoreOp}; +use store::{Error, ItemStore, StoreOp}; pub use store::{HotColdDB, MemoryStore}; use tracing::{debug, error, info, warn}; -use types::{ - BeaconState, BeaconStateError, BeaconStateHash, Checkpoint, Epoch, EthSpec, FixedBytesExtended, - Hash256, SignedBeaconBlockHash, Slot, -}; +use types::{BeaconState, BeaconStateHash, Checkpoint, Epoch, EthSpec, Hash256, Slot}; /// Compact at least this frequently, finalization permitting (7 days). const MAX_COMPACTION_PERIOD_SECONDS: u64 = 604800; @@ -42,8 +36,6 @@ pub struct BackgroundMigrator, Cold: ItemStore> prev_migration: Arc>, #[allow(clippy::type_complexity)] tx_thread: Option, thread::JoinHandle<()>)>>, - /// Genesis block root, for persisting the `PersistedBeaconChain`. - genesis_block_root: Hash256, } #[derive(Debug, Clone, PartialEq, Eq)] @@ -89,7 +81,7 @@ pub struct PrevMigration { pub enum PruningOutcome { /// The pruning succeeded and updated the pruning checkpoint from `old_finalized_checkpoint`. Successful { - old_finalized_checkpoint: Checkpoint, + old_finalized_checkpoint_epoch: Epoch, }, /// The run was aborted because the new finalized checkpoint is older than the previous one. OutOfOrderFinalization { @@ -116,6 +108,11 @@ pub enum PruningError { }, UnexpectedEqualStateRoots, UnexpectedUnequalStateRoots, + MissingSummaryForFinalizedCheckpoint(Hash256), + MissingBlindedBlock(Hash256), + SummariesDagError(&'static str, SummariesDagError), + EmptyFinalizedStates, + EmptyFinalizedBlocks, } /// Message sent to the migration thread containing the information it needs to run. @@ -130,25 +127,17 @@ pub enum Notification { pub struct ManualFinalizationNotification { pub state_root: BeaconStateHash, pub checkpoint: Checkpoint, - pub head_tracker: Arc, - pub genesis_block_root: Hash256, } pub struct FinalizationNotification { pub finalized_state_root: BeaconStateHash, pub finalized_checkpoint: Checkpoint, - pub head_tracker: Arc, pub prev_migration: Arc>, - pub genesis_block_root: Hash256, } impl, Cold: ItemStore> BackgroundMigrator { /// Create a new `BackgroundMigrator` and spawn its thread if necessary. - pub fn new( - db: Arc>, - config: MigratorConfig, - genesis_block_root: Hash256, - ) -> Self { + pub fn new(db: Arc>, config: MigratorConfig) -> Self { // Estimate last migration run from DB split slot. let prev_migration = Arc::new(Mutex::new(PrevMigration { epoch: db.get_split_slot().epoch(E::slots_per_epoch()), @@ -163,7 +152,6 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator, ) -> Result<(), BeaconChainError> { let notif = FinalizationNotification { finalized_state_root, finalized_checkpoint, - head_tracker, prev_migration: self.prev_migration.clone(), - genesis_block_root: self.genesis_block_root, }; // Send to background thread if configured, otherwise run in foreground. @@ -314,9 +299,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator {} + Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { + debug!( + slot = slot.as_u64(), + "Database migration postponed, unaligned finalized block" + ); + } + Err(e) => { + warn!(error = ?e, "Database migration failed"); + return; + } + }; + + let old_finalized_checkpoint_epoch = match Self::prune_hot_db( + db.clone(), + finalized_state_root.into(), &finalized_state, notif.finalized_checkpoint, - notif.genesis_block_root, ) { Ok(PruningOutcome::Successful { - old_finalized_checkpoint, - }) => old_finalized_checkpoint, + old_finalized_checkpoint_epoch, + }) => old_finalized_checkpoint_epoch, Ok(PruningOutcome::DeferredConcurrentHeadTrackerMutation) => { warn!( message = "this is expected only very rarely!", @@ -391,26 +391,10 @@ impl, Cold: ItemStore> BackgroundMigrator { - warn!(error = ?e,"Block pruning failed"); - return; - } - }; - - match migrate_database( - db.clone(), - finalized_state_root.into(), - finalized_block_root, - &finalized_state, - ) { - Ok(()) => {} - Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { - debug!( - slot = slot.as_u64(), - "Database migration postponed, unaligned finalized block" + warn!( + error = ?e, + "Hot DB pruning failed" ); - } - Err(e) => { - warn!(error = ?e, "Database migration failed"); return; } }; @@ -418,7 +402,7 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator>, - head_tracker: Arc, - new_finalized_state_hash: BeaconStateHash, + new_finalized_state_root: Hash256, new_finalized_state: &BeaconState, new_finalized_checkpoint: Checkpoint, - genesis_block_root: Hash256, ) -> Result { - let old_finalized_checkpoint = - store - .load_pruning_checkpoint()? - .unwrap_or_else(|| Checkpoint { - epoch: Epoch::new(0), - root: Hash256::zero(), - }); - - let old_finalized_slot = old_finalized_checkpoint - .epoch - .start_slot(E::slots_per_epoch()); let new_finalized_slot = new_finalized_checkpoint .epoch .start_slot(E::slots_per_epoch()); - let new_finalized_block_hash = new_finalized_checkpoint.root.into(); // The finalized state must be for the epoch boundary slot, not the slot of the finalized // block. @@ -549,200 +518,220 @@ impl, Cold: ItemStore> BackgroundMigrator new_finalized_slot { - return Ok(PruningOutcome::OutOfOrderFinalization { - old_finalized_checkpoint, - new_finalized_checkpoint, - }); - } - debug!( - old_finalized_epoch = %old_finalized_checkpoint.epoch, - new_finalized_epoch = %new_finalized_checkpoint.epoch, + new_finalized_checkpoint = ?new_finalized_checkpoint, + new_finalized_state_root = %new_finalized_state_root, "Starting database pruning" ); - // For each slot between the new finalized checkpoint and the old finalized checkpoint, - // collect the beacon block root and state root of the canonical chain. - let newly_finalized_chain: HashMap = - std::iter::once(Ok(( - new_finalized_slot, - (new_finalized_block_hash, new_finalized_state_hash), - ))) - .chain(RootsIterator::new(&store, new_finalized_state).map(|res| { - res.map(|(block_root, state_root, slot)| { - (slot, (block_root.into(), state_root.into())) + + let state_summaries_dag = { + let state_summaries = store + .load_hot_state_summaries()? + .into_iter() + .map(|(state_root, summary)| { + let block_root = summary.latest_block_root; + // This error should never happen unless we break a DB invariant + let block = store + .get_blinded_block(&block_root)? + .ok_or(PruningError::MissingBlindedBlock(block_root))?; + Ok(( + state_root, + DAGStateSummaryV22 { + slot: summary.slot, + latest_block_root: summary.latest_block_root, + block_slot: block.slot(), + block_parent_root: block.parent_root(), + }, + )) }) - })) - .take_while(|res| { - res.as_ref() - .map_or(true, |(slot, _)| *slot >= old_finalized_slot) - }) - .collect::>()?; + .collect::, BeaconChainError>>()?; + + // De-duplicate block roots to reduce block reads below + let summary_block_roots = HashSet::::from_iter( + state_summaries + .iter() + .map(|(_, summary)| summary.latest_block_root), + ); + + // Sanity check, there is at least one summary with the new finalized block root + if !summary_block_roots.contains(&new_finalized_checkpoint.root) { + return Err(BeaconChainError::PruningError( + PruningError::MissingSummaryForFinalizedCheckpoint( + new_finalized_checkpoint.root, + ), + )); + } + + StateSummariesDAG::new_from_v22(state_summaries) + .map_err(|e| PruningError::SummariesDagError("new StateSumariesDAG", e))? + }; + + // To debug faulty trees log if we unexpectedly have more than one root. These trees may not + // result in an error, as they may not be queried in the codepaths below. + let state_summaries_dag_roots = state_summaries_dag.tree_roots(); + if state_summaries_dag_roots.len() > 1 { + warn!( + state_summaries_dag_roots = ?state_summaries_dag_roots, + "Prune state summaries dag found more than one root" + ); + } + + // `new_finalized_state_root` is the *state at the slot of the finalized epoch*, + // rather than the state of the latest finalized block. These two values will only + // differ when the first slot of the finalized epoch is a skip slot. + let finalized_and_descendant_state_roots_of_finalized_checkpoint = + HashSet::::from_iter( + std::iter::once(new_finalized_state_root).chain( + state_summaries_dag + .descendants_of(&new_finalized_state_root) + .map_err(|e| PruningError::SummariesDagError("descendants of", e))?, + ), + ); + + // Collect all `latest_block_roots` of the + // finalized_and_descendant_state_roots_of_finalized_checkpoint set. Includes the finalized + // block as `new_finalized_state_root` always has a latest block root equal to the finalized + // block. + let finalized_and_descendant_block_roots_of_finalized_checkpoint = + HashSet::::from_iter( + state_summaries_dag + .blocks_of_states( + finalized_and_descendant_state_roots_of_finalized_checkpoint.iter(), + ) + // should never error, we just constructed + // finalized_and_descendant_state_roots_of_finalized_checkpoint from the + // state_summaries_dag + .map_err(|e| PruningError::SummariesDagError("blocks of descendant", e))? + .into_iter() + .map(|(block_root, _)| block_root), + ); + + // Note: ancestors_of includes the finalized state root + let newly_finalized_state_summaries = state_summaries_dag + .ancestors_of(new_finalized_state_root) + .map_err(|e| PruningError::SummariesDagError("ancestors of", e))?; + let newly_finalized_state_roots = newly_finalized_state_summaries + .iter() + .map(|(root, _)| *root) + .collect::>(); + let newly_finalized_states_min_slot = *newly_finalized_state_summaries + .iter() + .map(|(_, slot)| slot) + .min() + .ok_or(PruningError::EmptyFinalizedStates)?; + + // Note: ancestors_of includes the finalized block + let newly_finalized_blocks = state_summaries_dag + .blocks_of_states(newly_finalized_state_roots.iter()) + .map_err(|e| PruningError::SummariesDagError("blocks of newly finalized", e))?; // We don't know which blocks are shared among abandoned chains, so we buffer and delete // everything in one fell swoop. - let mut abandoned_blocks: HashSet = HashSet::new(); - let mut abandoned_states: HashSet<(Slot, BeaconStateHash)> = HashSet::new(); - let mut abandoned_heads: HashSet = HashSet::new(); + let mut blocks_to_prune: HashSet = HashSet::new(); + let mut states_to_prune: HashSet<(Slot, Hash256)> = HashSet::new(); - let heads = head_tracker.heads(); - debug!( - old_finalized_root = ?old_finalized_checkpoint.root, - new_finalized_root = ?new_finalized_checkpoint.root, - head_count = heads.len(), - "Extra pruning information" - ); + // Consider the following block tree where we finalize block `[0]` at the checkpoint `(f)`. + // There's a block `[3]` that descendends from the finalized block but NOT from the + // finalized checkpoint. The block tree rooted in `[3]` conflicts with finality and must be + // pruned. Therefore we collect all state summaries descendant of `(f)`. + // + // finalize epoch boundary + // | /-------[2]----- + // [0]-------|--(f)--[1]---------- + // \---[3]--|-----------------[4] + // | - for (head_hash, head_slot) in heads { - // Load head block. If it fails with a decode error, it's likely a reverted block, - // so delete it from the head tracker but leave it and its states in the database - // This is suboptimal as it wastes disk space, but it's difficult to fix. A re-sync - // can be used to reclaim the space. - let head_state_root = match store.get_blinded_block(&head_hash) { - Ok(Some(block)) => block.state_root(), - Ok(None) => { - return Err(BeaconStateError::MissingBeaconBlock(head_hash.into()).into()) + for (_, summaries) in state_summaries_dag.summaries_by_slot_ascending() { + for (state_root, summary) in summaries { + let should_prune = if finalized_and_descendant_state_roots_of_finalized_checkpoint + .contains(&state_root) + { + // This state is a viable descendant of the finalized checkpoint, so does not + // conflict with finality and can be built on or become a head + false + } else { + // Everything else, prune + true + }; + + if should_prune { + // States are migrated into the cold DB in the migrate step. All hot states + // prior to finalized can be pruned from the hot DB columns + states_to_prune.insert((summary.slot, state_root)); } - Err(Error::SszDecodeError(e)) => { - warn!( - block_root = ?head_hash, - error = ?e, - "Forgetting invalid head block" - ); - abandoned_heads.insert(head_hash); - continue; - } - Err(e) => return Err(e.into()), + } + } + + for (block_root, slot) in state_summaries_dag.iter_blocks() { + // Blocks both finalized and unfinalized are in the same DB column. We must only + // prune blocks from abandoned forks. Note that block pruning and state pruning differ. + // The blocks DB column is shared for hot and cold data, while the states have different + // columns. Thus, we only prune unviable blocks or from abandoned forks. + let should_prune = if finalized_and_descendant_block_roots_of_finalized_checkpoint + .contains(&block_root) + { + // Keep unfinalized blocks descendant of finalized checkpoint + finalized block + // itself Note that we anchor this set on the finalized checkpoint instead of the + // finalized block. A diagram above shows a relevant example. + false + } else if newly_finalized_blocks.contains(&(block_root, slot)) { + // Keep recently finalized blocks + false + } else if slot < newly_finalized_states_min_slot { + // Keep recently finalized blocks that we know are canonical. Blocks with slots < + // that `newly_finalized_blocks_min_slot` we don't have canonical information so we + // assume they are part of the finalized pruned chain + // + // Pruning these would risk breaking the DB by deleting canonical blocks once the + // HDiff grid advances. If the pruning routine is correct this condition should + // never be hit. + false + } else { + // Everything else, prune + true }; - let mut potentially_abandoned_head = Some(head_hash); - let mut potentially_abandoned_blocks = vec![]; - - // Iterate backwards from this head, staging blocks and states for deletion. - let iter = std::iter::once(Ok((head_hash, head_state_root, head_slot))) - .chain(RootsIterator::from_block(&store, head_hash)?); - - for maybe_tuple in iter { - let (block_root, state_root, slot) = maybe_tuple?; - let block_root = SignedBeaconBlockHash::from(block_root); - let state_root = BeaconStateHash::from(state_root); - - match newly_finalized_chain.get(&slot) { - // If there's no information about a slot on the finalized chain, then - // it should be because it's ahead of the new finalized slot. Stage - // the fork's block and state for possible deletion. - None => { - if slot > new_finalized_slot { - potentially_abandoned_blocks.push(( - slot, - Some(block_root), - Some(state_root), - )); - } else if slot >= old_finalized_slot { - return Err(PruningError::MissingInfoForCanonicalChain { slot }.into()); - } else { - // We must assume here any candidate chains include the old finalized - // checkpoint, i.e. there aren't any forks starting at a block that is a - // strict ancestor of old_finalized_checkpoint. - warn!( - head_block_root = ?head_hash, - %head_slot, - "Found a chain that should already have been pruned" - ); - potentially_abandoned_head.take(); - break; - } - } - Some((finalized_block_root, finalized_state_root)) => { - // This fork descends from a newly finalized block, we can stop. - if block_root == *finalized_block_root { - // Sanity check: if the slot and block root match, then the - // state roots should match too. - if state_root != *finalized_state_root { - return Err(PruningError::UnexpectedUnequalStateRoots.into()); - } - - // If the fork descends from the whole finalized chain, - // do not prune it. Otherwise continue to delete all - // of the blocks and states that have been staged for - // deletion so far. - if slot == new_finalized_slot { - potentially_abandoned_blocks.clear(); - potentially_abandoned_head.take(); - } - // If there are skipped slots on the fork to be pruned, then - // we will have just staged the common block for deletion. - // Unstage it. - else { - for (_, block_root, _) in - potentially_abandoned_blocks.iter_mut().rev() - { - if block_root.as_ref() == Some(finalized_block_root) { - *block_root = None; - } else { - break; - } - } - } - break; - } else { - if state_root == *finalized_state_root { - return Err(PruningError::UnexpectedEqualStateRoots.into()); - } - potentially_abandoned_blocks.push(( - slot, - Some(block_root), - Some(state_root), - )); - } - } - } - } - - if let Some(abandoned_head) = potentially_abandoned_head { - debug!( - head_block_root = ?abandoned_head, - %head_slot, - "Pruning head" - ); - abandoned_heads.insert(abandoned_head); - abandoned_blocks.extend( - potentially_abandoned_blocks - .iter() - .filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash), - ); - abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map( - |(slot, _, maybe_state_hash)| maybe_state_hash.map(|sr| (*slot, sr)), - )); + if should_prune { + blocks_to_prune.insert(block_root); } } - // Update the head tracker before the database, so that we maintain the invariant - // that a block present in the head tracker is present in the database. - // See https://github.com/sigp/lighthouse/issues/1557 - let mut head_tracker_lock = head_tracker.0.write(); + // Sort states to prune to make it more readable + let mut states_to_prune = states_to_prune.into_iter().collect::>(); + states_to_prune.sort_by_key(|(slot, _)| *slot); - // Check that all the heads to be deleted are still present. The absence of any - // head indicates a race, that will likely resolve itself, so we defer pruning until - // later. - for head_hash in &abandoned_heads { - if !head_tracker_lock.contains_key(head_hash) { - return Ok(PruningOutcome::DeferredConcurrentHeadTrackerMutation); - } + debug!( + new_finalized_checkpoint = ?new_finalized_checkpoint, + newly_finalized_blocks = newly_finalized_blocks.len(), + newly_finalized_state_roots = newly_finalized_state_roots.len(), + newly_finalized_states_min_slot = %newly_finalized_states_min_slot, + state_summaries_count = state_summaries_dag.summaries_count(), + state_summaries_dag_roots = ?state_summaries_dag_roots, + finalized_and_descendant_state_roots_of_finalized_checkpoint = finalized_and_descendant_state_roots_of_finalized_checkpoint.len(), + finalized_and_descendant_state_roots_of_finalized_checkpoint = finalized_and_descendant_state_roots_of_finalized_checkpoint.len(), + blocks_to_prune = blocks_to_prune.len(), + states_to_prune = states_to_prune.len(), + "Extra pruning information" + ); + // Don't log the full `states_to_prune` in the log statement above as it can result in a + // single log line of +1Kb and break logging setups. + for block_root in &blocks_to_prune { + debug!( + block_root = ?block_root, + "Pruning block" + ); + } + for (slot, state_root) in &states_to_prune { + debug!( + ?state_root, + %slot, + "Pruning hot state" + ); } - // Then remove them for real. - for head_hash in abandoned_heads { - head_tracker_lock.remove(&head_hash); - } - - let mut batch: Vec> = abandoned_blocks + let mut batch: Vec> = blocks_to_prune .into_iter() - .map(Into::into) - .flat_map(|block_root: Hash256| { + .flat_map(|block_root| { [ StoreOp::DeleteBlock(block_root), StoreOp::DeleteExecutionPayload(block_root), @@ -750,43 +739,87 @@ impl, Cold: ItemStore> BackgroundMigrator>, + ) { + for (block_root, slot) in finalized_blocks { + // Delete the execution payload if payload pruning is enabled. At a skipped slot we may + // delete the payload for the finalized block itself, but that's OK as we only guarantee + // that payloads are present for slots >= the split slot. + if *slot < new_finalized_slot { + hot_db_ops.push(StoreOp::DeleteExecutionPayload(*block_root)); + } + } + } + + fn prune_non_checkpoint_sync_committee_branches( + finalized_blocks_desc: &[(Hash256, Slot)], + hot_db_ops: &mut Vec>, + ) { + let mut epoch_boundary_blocks = HashSet::new(); + let mut non_checkpoint_block_roots = HashSet::new(); + + // Then, iterate states in slot ascending order, as they are stored wrt previous states. + for (block_root, slot) in finalized_blocks_desc.iter().rev() { + // At a missed slot, `state_root_iter` will return the block root + // from the previous non-missed slot. This ensures that the block root at an + // epoch boundary is always a checkpoint block root. We keep track of block roots + // at epoch boundaries by storing them in the `epoch_boundary_blocks` hash set. + // We then ensure that block roots at the epoch boundary aren't included in the + // `non_checkpoint_block_roots` hash set. + if *slot % E::slots_per_epoch() == 0 { + epoch_boundary_blocks.insert(block_root); + } else { + non_checkpoint_block_roots.insert(block_root); + } + + if epoch_boundary_blocks.contains(&block_root) { + non_checkpoint_block_roots.remove(&block_root); + } + } + + // Prune sync committee branch data for all non checkpoint block roots. + // Note that `non_checkpoint_block_roots` should only contain non checkpoint block roots + // as long as `finalized_state.slot()` is at an epoch boundary. If this were not the case + // we risk the chance of pruning a `sync_committee_branch` for a checkpoint block root. + // E.g. if `current_split_slot` = (Epoch A slot 0) and `finalized_state.slot()` = (Epoch C slot 31) + // and (Epoch D slot 0) is a skipped slot, we will have pruned a `sync_committee_branch` + // for a checkpoint block root. + non_checkpoint_block_roots + .into_iter() + .for_each(|block_root| { + hot_db_ops.push(StoreOp::DeleteSyncCommitteeBranch(*block_root)); + }); + } + /// Compact the database if it has been more than `COMPACTION_PERIOD_SECONDS` since it /// was last compacted. pub fn run_compaction( diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index adb68def0d..83affb0dcd 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -1,24 +1,11 @@ -use crate::head_tracker::SszHeadTracker; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error as StoreError, StoreItem}; use types::Hash256; -/// Dummy value to use for the canonical head block root, see below. -pub const DUMMY_CANONICAL_HEAD_BLOCK_ROOT: Hash256 = Hash256::repeat_byte(0xff); - #[derive(Clone, Encode, Decode)] pub struct PersistedBeaconChain { - /// This value is ignored to resolve the issue described here: - /// - /// https://github.com/sigp/lighthouse/pull/1639 - /// - /// Its removal is tracked here: - /// - /// https://github.com/sigp/lighthouse/issues/1784 - pub _canonical_head_block_root: Hash256, pub genesis_block_root: Hash256, - pub ssz_head_tracker: SszHeadTracker, } impl StoreItem for PersistedBeaconChain { diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index ccfae1b182..49aa116f6c 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -2,6 +2,7 @@ mod migration_schema_v20; mod migration_schema_v21; mod migration_schema_v22; +mod migration_schema_v23; use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; @@ -57,6 +58,14 @@ pub fn migrate_schema( // bumped inside the upgrade_to_v22 fn migration_schema_v22::upgrade_to_v22::(db.clone(), genesis_state_root) } + (SchemaVersion(22), SchemaVersion(23)) => { + let ops = migration_schema_v23::upgrade_to_v23::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(23), SchemaVersion(22)) => { + let ops = migration_schema_v23::downgrade_from_v23::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs new file mode 100644 index 0000000000..e66178df53 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs @@ -0,0 +1,147 @@ +use crate::beacon_chain::BeaconChainTypes; +use crate::persisted_fork_choice::PersistedForkChoice; +use crate::schema_change::StoreError; +use crate::test_utils::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY}; +use crate::BeaconForkChoiceStore; +use fork_choice::{ForkChoice, ResetPayloadStatuses}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::sync::Arc; +use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem}; +use types::{Hash256, Slot}; + +/// Dummy value to use for the canonical head block root, see below. +pub const DUMMY_CANONICAL_HEAD_BLOCK_ROOT: Hash256 = Hash256::repeat_byte(0xff); + +pub fn upgrade_to_v23( + db: Arc>, +) -> Result, Error> { + // 1) Set the head-tracker to empty + let Some(persisted_beacon_chain_v22) = + db.get_item::(&BEACON_CHAIN_DB_KEY)? + else { + return Err(Error::MigrationError( + "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string() + )); + }; + + let persisted_beacon_chain = PersistedBeaconChain { + genesis_block_root: persisted_beacon_chain_v22.genesis_block_root, + }; + + let mut ops = vec![persisted_beacon_chain.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; + + // 2) Wipe out all state temporary flags. While un-used in V23, if there's a rollback we could + // end-up with an inconsistent DB. + for state_root_result in db + .hot_db + .iter_column_keys::(DBColumn::BeaconStateTemporary) + { + ops.push(KeyValueStoreOp::DeleteKey( + DBColumn::BeaconStateTemporary, + state_root_result?.as_slice().to_vec(), + )); + } + + Ok(ops) +} + +pub fn downgrade_from_v23( + db: Arc>, +) -> Result, Error> { + let Some(persisted_beacon_chain) = db.get_item::(&BEACON_CHAIN_DB_KEY)? + else { + // The `PersistedBeaconChain` must exist if fork choice exists. + return Err(Error::MigrationError( + "No persisted beacon chain found in DB. Datadir could be incorrect or DB could be corrupt".to_string(), + )); + }; + + // Recreate head-tracker from fork choice. + let Some(persisted_fork_choice) = db.get_item::(&FORK_CHOICE_DB_KEY)? + else { + // Fork choice should exist if the database exists. + return Err(Error::MigrationError( + "No fork choice found in DB".to_string(), + )); + }; + + let fc_store = + BeaconForkChoiceStore::from_persisted(persisted_fork_choice.fork_choice_store, db.clone()) + .map_err(|e| { + Error::MigrationError(format!( + "Error loading fork choise store from persisted: {e:?}" + )) + })?; + + // Doesn't matter what policy we use for invalid payloads, as our head calculation just + // considers descent from finalization. + let reset_payload_statuses = ResetPayloadStatuses::OnlyWithInvalidPayload; + let fork_choice = ForkChoice::from_persisted( + persisted_fork_choice.fork_choice, + reset_payload_statuses, + fc_store, + &db.spec, + ) + .map_err(|e| { + Error::MigrationError(format!("Error loading fork choice from persisted: {e:?}")) + })?; + + let heads = fork_choice + .proto_array() + .heads_descended_from_finalization::(); + + let head_roots = heads.iter().map(|node| node.root).collect(); + let head_slots = heads.iter().map(|node| node.slot).collect(); + + let persisted_beacon_chain_v22 = PersistedBeaconChainV22 { + _canonical_head_block_root: DUMMY_CANONICAL_HEAD_BLOCK_ROOT, + genesis_block_root: persisted_beacon_chain.genesis_block_root, + ssz_head_tracker: SszHeadTracker { + roots: head_roots, + slots: head_slots, + }, + }; + + let ops = vec![persisted_beacon_chain_v22.as_kv_store_op(BEACON_CHAIN_DB_KEY)]; + + Ok(ops) +} + +/// Helper struct that is used to encode/decode the state of the `HeadTracker` as SSZ bytes. +/// +/// This is used when persisting the state of the `BeaconChain` to disk. +#[derive(Encode, Decode, Clone)] +pub struct SszHeadTracker { + roots: Vec, + slots: Vec, +} + +#[derive(Clone, Encode, Decode)] +pub struct PersistedBeaconChainV22 { + /// This value is ignored to resolve the issue described here: + /// + /// https://github.com/sigp/lighthouse/pull/1639 + /// + /// Its removal is tracked here: + /// + /// https://github.com/sigp/lighthouse/issues/1784 + pub _canonical_head_block_root: Hash256, + pub genesis_block_root: Hash256, + /// DEPRECATED + pub ssz_head_tracker: SszHeadTracker, +} + +impl StoreItem for PersistedBeaconChainV22 { + fn db_column() -> DBColumn { + DBColumn::BeaconChain + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } +} diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index f4216ef76d..9135c3ce88 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -23,7 +23,6 @@ use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; -use store::KeyValueStore; use task_executor::TaskExecutor; use tokio::time::{sleep, sleep_until, Instant}; use tracing::{debug, error, warn}; @@ -297,7 +296,7 @@ fn advance_head(beacon_chain: &Arc>) -> Resu // Protect against advancing a state more than a single slot. // // Advancing more than one slot without storing the intermediate state would corrupt the - // database. Future works might store temporary, intermediate states inside this function. + // database. Future works might store intermediate states inside this function. match state.slot().cmp(&state.latest_block_header().slot) { std::cmp::Ordering::Equal => (), std::cmp::Ordering::Greater => { @@ -432,20 +431,13 @@ fn advance_head(beacon_chain: &Arc>) -> Resu ); } - // Write the advanced state to the database with a temporary flag that will be deleted when - // a block is imported on top of this state. We should delete this once we bring in the DB - // changes from tree-states that allow us to prune states without temporary flags. + // Write the advanced state to the database. + // We no longer use a transaction lock here when checking whether the state exists, because + // even if we race with the deletion of this state by the finalization pruning code, the worst + // case is we end up with a finalized state stored, that will get pruned the next time pruning + // runs. let advanced_state_root = state.update_tree_hash_cache()?; - let txn_lock = beacon_chain.store.hot_db.begin_rw_transaction(); - let state_already_exists = beacon_chain - .store - .load_hot_state_summary(&advanced_state_root)? - .is_some(); - let temporary = !state_already_exists; - beacon_chain - .store - .put_state_possibly_temporary(&advanced_state_root, &state, temporary)?; - drop(txn_lock); + beacon_chain.store.put_state(&advanced_state_root, &state)?; debug!( ?head_block_root, diff --git a/beacon_node/beacon_chain/src/summaries_dag.rs b/beacon_node/beacon_chain/src/summaries_dag.rs new file mode 100644 index 0000000000..ab379d1eb2 --- /dev/null +++ b/beacon_node/beacon_chain/src/summaries_dag.rs @@ -0,0 +1,464 @@ +use itertools::Itertools; +use std::{ + cmp::Ordering, + collections::{btree_map::Entry, BTreeMap, HashMap}, +}; +use types::{Hash256, Slot}; + +#[derive(Debug, Clone, Copy)] +pub struct DAGStateSummary { + pub slot: Slot, + pub latest_block_root: Hash256, + pub latest_block_slot: Slot, + pub previous_state_root: Hash256, +} + +#[derive(Debug, Clone, Copy)] +pub struct DAGStateSummaryV22 { + pub slot: Slot, + pub latest_block_root: Hash256, + pub block_slot: Slot, + pub block_parent_root: Hash256, +} + +pub struct StateSummariesDAG { + // state_root -> state_summary + state_summaries_by_state_root: HashMap, + // block_root -> state slot -> (state_root, state summary) + state_summaries_by_block_root: HashMap>, + // parent_state_root -> Vec + // cached value to prevent having to recompute in each recursive call into `descendants_of` + child_state_roots: HashMap>, +} + +#[derive(Debug)] +pub enum Error { + DuplicateStateSummary { + block_root: Hash256, + existing_state_summary: Box<(Slot, Hash256)>, + new_state_summary: (Slot, Hash256), + }, + MissingStateSummary(Hash256), + MissingStateSummaryByBlockRoot { + state_root: Hash256, + latest_block_root: Hash256, + }, + StateSummariesNotContiguous { + state_root: Hash256, + state_slot: Slot, + latest_block_root: Hash256, + parent_block_root: Box, + parent_block_latest_state_summary: Box>, + }, + MissingChildStateRoot(Hash256), + RequestedSlotAboveSummary { + starting_state_root: Hash256, + ancestor_slot: Slot, + state_root: Hash256, + state_slot: Slot, + }, + RootUnknownPreviousStateRoot(Slot, Hash256), + RootUnknownAncestorStateRoot { + starting_state_root: Hash256, + ancestor_slot: Slot, + root_state_root: Hash256, + root_state_slot: Slot, + }, +} + +impl StateSummariesDAG { + pub fn new(state_summaries: Vec<(Hash256, DAGStateSummary)>) -> Result { + // Group them by latest block root, and sorted state slot + let mut state_summaries_by_state_root = HashMap::new(); + let mut state_summaries_by_block_root = HashMap::<_, BTreeMap<_, _>>::new(); + let mut child_state_roots = HashMap::<_, Vec<_>>::new(); + + for (state_root, summary) in state_summaries.into_iter() { + let summaries = state_summaries_by_block_root + .entry(summary.latest_block_root) + .or_default(); + + // Sanity check to ensure no duplicate summaries for the tuple (block_root, state_slot) + match summaries.entry(summary.slot) { + Entry::Vacant(entry) => { + entry.insert((state_root, summary)); + } + Entry::Occupied(existing) => { + return Err(Error::DuplicateStateSummary { + block_root: summary.latest_block_root, + existing_state_summary: (summary.slot, state_root).into(), + new_state_summary: (*existing.key(), existing.get().0), + }) + } + } + + state_summaries_by_state_root.insert(state_root, summary); + + child_state_roots + .entry(summary.previous_state_root) + .or_default() + .push(state_root); + // Add empty entry for the child state + child_state_roots.entry(state_root).or_default(); + } + + Ok(Self { + state_summaries_by_state_root, + state_summaries_by_block_root, + child_state_roots, + }) + } + + /// Computes a DAG from a sequence of state summaries, including their parent block + /// relationships. + /// + /// - Expects summaries to be contiguous per slot: there must exist a summary at every slot + /// of each tree branch + /// - Maybe include multiple disjoint trees. The root of each tree will have a ZERO parent state + /// root, which will error later when calling `previous_state_root`. + pub fn new_from_v22( + state_summaries_v22: Vec<(Hash256, DAGStateSummaryV22)>, + ) -> Result { + // Group them by latest block root, and sorted state slot + let mut state_summaries_by_block_root = HashMap::<_, BTreeMap<_, _>>::new(); + for (state_root, summary) in state_summaries_v22.iter() { + let summaries = state_summaries_by_block_root + .entry(summary.latest_block_root) + .or_default(); + + // Sanity check to ensure no duplicate summaries for the tuple (block_root, state_slot) + match summaries.entry(summary.slot) { + Entry::Vacant(entry) => { + entry.insert((state_root, summary)); + } + Entry::Occupied(existing) => { + return Err(Error::DuplicateStateSummary { + block_root: summary.latest_block_root, + existing_state_summary: (summary.slot, *state_root).into(), + new_state_summary: (*existing.key(), *existing.get().0), + }) + } + } + } + + let state_summaries = state_summaries_v22 + .iter() + .map(|(state_root, summary)| { + let previous_state_root = if summary.slot == 0 { + Hash256::ZERO + } else { + let previous_slot = summary.slot - 1; + + // Check the set of states in the same state's block root + let same_block_root_summaries = state_summaries_by_block_root + .get(&summary.latest_block_root) + // Should never error: we construct the HashMap here and must have at least + // one entry per block root + .ok_or(Error::MissingStateSummaryByBlockRoot { + state_root: *state_root, + latest_block_root: summary.latest_block_root, + })?; + if let Some((state_root, _)) = same_block_root_summaries.get(&previous_slot) { + // Skipped slot: block root at previous slot is the same as latest block root. + **state_root + } else { + // Common case: not a skipped slot. + let parent_block_root = summary.block_parent_root; + if let Some(parent_block_summaries) = + state_summaries_by_block_root.get(&parent_block_root) + { + *parent_block_summaries + .get(&previous_slot) + // Should never error: summaries are contiguous, so if there's an + // entry it must contain at least one summary at the previous slot. + .ok_or(Error::StateSummariesNotContiguous { + state_root: *state_root, + state_slot: summary.slot, + latest_block_root: summary.latest_block_root, + parent_block_root: parent_block_root.into(), + parent_block_latest_state_summary: parent_block_summaries + .iter() + .max_by(|a, b| a.0.cmp(b.0)) + .map(|(slot, (state_root, _))| (*slot, **state_root)) + .into(), + })? + .0 + } else { + // We don't know of any summary with this parent block root. We'll + // consider this summary to be a root of `state_summaries_v22` + // collection and mark it as zero. + // The test store_tests::finalizes_non_epoch_start_slot manages to send two + // disjoint trees on its second migration. + Hash256::ZERO + } + } + }; + + Ok(( + *state_root, + DAGStateSummary { + slot: summary.slot, + latest_block_root: summary.latest_block_root, + latest_block_slot: summary.block_slot, + previous_state_root, + }, + )) + }) + .collect::, _>>()?; + + Self::new(state_summaries) + } + + // Returns all non-unique latest block roots of a given set of states + pub fn blocks_of_states<'a, I: Iterator>( + &self, + state_roots: I, + ) -> Result, Error> { + state_roots + .map(|state_root| { + let summary = self + .state_summaries_by_state_root + .get(state_root) + .ok_or(Error::MissingStateSummary(*state_root))?; + Ok((summary.latest_block_root, summary.latest_block_slot)) + }) + .collect() + } + + // Returns all unique latest blocks of this DAG's summaries + pub fn iter_blocks(&self) -> impl Iterator + '_ { + self.state_summaries_by_state_root + .values() + .map(|summary| (summary.latest_block_root, summary.latest_block_slot)) + .unique() + } + + /// Returns a vec of state summaries that have an unknown parent when forming the DAG tree + pub fn tree_roots(&self) -> Vec<(Hash256, DAGStateSummary)> { + self.state_summaries_by_state_root + .iter() + .filter_map(|(state_root, summary)| { + if self + .state_summaries_by_state_root + .contains_key(&summary.previous_state_root) + { + // Summaries with a known parent are not roots + None + } else { + Some((*state_root, *summary)) + } + }) + .collect() + } + + pub fn summaries_count(&self) -> usize { + self.state_summaries_by_block_root + .values() + .map(|s| s.len()) + .sum() + } + + pub fn summaries_by_slot_ascending(&self) -> BTreeMap> { + let mut summaries = BTreeMap::>::new(); + for (state_root, summary) in self.state_summaries_by_state_root.iter() { + summaries + .entry(summary.slot) + .or_default() + .push((*state_root, *summary)); + } + summaries + } + + pub fn previous_state_root(&self, state_root: Hash256) -> Result { + let summary = self + .state_summaries_by_state_root + .get(&state_root) + .ok_or(Error::MissingStateSummary(state_root))?; + if summary.previous_state_root == Hash256::ZERO { + Err(Error::RootUnknownPreviousStateRoot( + summary.slot, + state_root, + )) + } else { + Ok(summary.previous_state_root) + } + } + + pub fn ancestor_state_root_at_slot( + &self, + starting_state_root: Hash256, + ancestor_slot: Slot, + ) -> Result { + let mut state_root = starting_state_root; + // Walk backwards until we reach the state at `ancestor_slot`. + loop { + let summary = self + .state_summaries_by_state_root + .get(&state_root) + .ok_or(Error::MissingStateSummary(state_root))?; + + // Assumes all summaries are contiguous + match summary.slot.cmp(&ancestor_slot) { + Ordering::Less => { + return Err(Error::RequestedSlotAboveSummary { + starting_state_root, + ancestor_slot, + state_root, + state_slot: summary.slot, + }) + } + Ordering::Equal => { + return Ok(state_root); + } + Ordering::Greater => { + if summary.previous_state_root == Hash256::ZERO { + return Err(Error::RootUnknownAncestorStateRoot { + starting_state_root, + ancestor_slot, + root_state_root: state_root, + root_state_slot: summary.slot, + }); + } else { + state_root = summary.previous_state_root; + } + } + } + } + } + + /// Returns all ancestors of `state_root` INCLUDING `state_root` until the next parent is not + /// known. + pub fn ancestors_of(&self, mut state_root: Hash256) -> Result, Error> { + // Sanity check that the first summary exists + if !self.state_summaries_by_state_root.contains_key(&state_root) { + return Err(Error::MissingStateSummary(state_root)); + } + + let mut ancestors = vec![]; + loop { + if let Some(summary) = self.state_summaries_by_state_root.get(&state_root) { + ancestors.push((state_root, summary.slot)); + state_root = summary.previous_state_root + } else { + return Ok(ancestors); + } + } + } + + /// Returns of the descendant state summaries roots given an initiail state root. + pub fn descendants_of(&self, query_state_root: &Hash256) -> Result, Error> { + let mut descendants = vec![]; + for child_root in self + .child_state_roots + .get(query_state_root) + .ok_or(Error::MissingChildStateRoot(*query_state_root))? + { + descendants.push(*child_root); + descendants.extend(self.descendants_of(child_root)?); + } + Ok(descendants) + } +} + +#[cfg(test)] +mod tests { + use super::{DAGStateSummaryV22, Error, StateSummariesDAG}; + use bls::FixedBytesExtended; + use types::{Hash256, Slot}; + + fn root(n: u64) -> Hash256 { + Hash256::from_low_u64_le(n) + } + + #[test] + fn new_from_v22_empty() { + StateSummariesDAG::new_from_v22(vec![]).unwrap(); + } + + fn assert_previous_state_root_is_zero(dag: &StateSummariesDAG, root: Hash256) { + assert!(matches!( + dag.previous_state_root(root).unwrap_err(), + Error::RootUnknownPreviousStateRoot { .. } + )); + } + + #[test] + fn new_from_v22_one_state() { + let root_a = root(0xa); + let root_1 = root(1); + let root_2 = root(2); + let summary_1 = DAGStateSummaryV22 { + slot: Slot::new(1), + latest_block_root: root_1, + block_parent_root: root_2, + block_slot: Slot::new(1), + }; + + let dag = StateSummariesDAG::new_from_v22(vec![(root_a, summary_1)]).unwrap(); + + // The parent of the root summary is ZERO + assert_previous_state_root_is_zero(&dag, root_a); + } + + #[test] + fn new_from_v22_multiple_states() { + let dag = StateSummariesDAG::new_from_v22(vec![ + ( + root(0xa), + DAGStateSummaryV22 { + slot: Slot::new(3), + latest_block_root: root(3), + block_parent_root: root(1), + block_slot: Slot::new(3), + }, + ), + ( + root(0xb), + DAGStateSummaryV22 { + slot: Slot::new(4), + latest_block_root: root(4), + block_parent_root: root(3), + block_slot: Slot::new(4), + }, + ), + // fork 1 + ( + root(0xc), + DAGStateSummaryV22 { + slot: Slot::new(5), + latest_block_root: root(5), + block_parent_root: root(4), + block_slot: Slot::new(5), + }, + ), + // fork 2 + // skipped slot + ( + root(0xd), + DAGStateSummaryV22 { + slot: Slot::new(5), + latest_block_root: root(4), + block_parent_root: root(3), + block_slot: Slot::new(4), + }, + ), + // normal slot + ( + root(0xe), + DAGStateSummaryV22 { + slot: Slot::new(6), + latest_block_root: root(6), + block_parent_root: root(4), + block_slot: Slot::new(6), + }, + ), + ]) + .unwrap(); + + // The parent of the root summary is ZERO + assert_previous_state_root_is_zero(&dag, root(0xa)); + assert_eq!(dag.previous_state_root(root(0xc)).unwrap(), root(0xb)); + assert_eq!(dag.previous_state_root(root(0xd)).unwrap(), root(0xb)); + assert_eq!(dag.previous_state_root(root(0xe)).unwrap(), root(0xd)); + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index beff95eb77..ca083f0572 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -38,8 +38,7 @@ use kzg::{Kzg, TrustedSetup}; use logging::create_test_tracing_subscriber; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; -use parking_lot::Mutex; -use parking_lot::RwLockWriteGuard; +use parking_lot::{Mutex, RwLockWriteGuard}; use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; @@ -588,7 +587,8 @@ where .chain_config(chain_config) .import_all_data_columns(self.import_all_data_columns) .event_handler(Some(ServerSentEventHandler::new_with_capacity(5))) - .validator_monitor_config(validator_monitor_config); + .validator_monitor_config(validator_monitor_config) + .rng(Box::new(StdRng::seed_from_u64(42))); builder = if let Some(mutator) = self.initial_mutator { mutator(builder) @@ -893,6 +893,28 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } + pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool { + self.chain + .heads() + .iter() + .any(|(head, _)| *head == Hash256::from(*block_hash)) + } + + pub fn assert_knows_head(&self, head_block_root: Hash256) { + let heads = self.chain.heads(); + if !heads.iter().any(|(head, _)| *head == head_block_root) { + let fork_choice = self.chain.canonical_head.fork_choice_read_lock(); + if heads.is_empty() { + let nodes = &fork_choice.proto_array().core_proto_array().nodes; + panic!("Expected to know head block root {head_block_root:?}, but heads is empty. Nodes: {nodes:#?}"); + } else { + panic!( + "Expected to know head block root {head_block_root:?}, known heads {heads:#?}" + ); + } + } + } + pub async fn make_blinded_block( &self, state: BeaconState, @@ -2344,7 +2366,7 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); if !has_blobs { - return RpcBlock::new_without_blobs(Some(block_root), block); + return RpcBlock::new_without_blobs(Some(block_root), block, 0); } // Blobs are stored as data columns from Fulu (PeerDAS) @@ -2395,7 +2417,7 @@ where &self.spec, )? } else { - RpcBlock::new_without_blobs(Some(block_root), block) + RpcBlock::new_without_blobs(Some(block_root), block, 0) } } else { let blobs = blob_items @@ -3172,7 +3194,7 @@ pub fn generate_rand_block_and_blobs( NumBlobs::None => 0, }; let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { @@ -3192,7 +3214,7 @@ pub fn generate_rand_block_and_blobs( NumBlobs::None => 0, }; let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { payload.execution_payload.transactions.push(tx).unwrap(); @@ -3211,7 +3233,7 @@ pub fn generate_rand_block_and_blobs( NumBlobs::None => 0, }; let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); payload.execution_payload.transactions = <_>::default(); for tx in Vec::from(transactions) { payload.execution_payload.transactions.push(tx).unwrap(); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 3dc46be16e..9225ffd9f4 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -147,7 +147,7 @@ fn build_rpc_block( RpcBlock::new_with_custody_columns(None, block, columns.clone(), columns.len(), spec) .unwrap() } - None => RpcBlock::new_without_blobs(None, block), + None => RpcBlock::new_without_blobs(None, block, 0), } } @@ -370,6 +370,7 @@ async fn chain_segment_non_linear_parent_roots() { blocks[3] = RpcBlock::new_without_blobs( None, Arc::new(SignedBeaconBlock::from_block(block, signature)), + harness.sampling_column_count, ); assert!( @@ -407,6 +408,7 @@ async fn chain_segment_non_linear_slots() { blocks[3] = RpcBlock::new_without_blobs( None, Arc::new(SignedBeaconBlock::from_block(block, signature)), + harness.sampling_column_count, ); assert!( @@ -434,6 +436,7 @@ async fn chain_segment_non_linear_slots() { blocks[3] = RpcBlock::new_without_blobs( None, Arc::new(SignedBeaconBlock::from_block(block, signature)), + harness.sampling_column_count, ); assert!( @@ -575,11 +578,16 @@ async fn invalid_signature_gossip_block() { .into_block_error() .expect("should import all blocks prior to the one being tested"); let signed_block = SignedBeaconBlock::from_block(block, junk_signature()); + let rpc_block = RpcBlock::new_without_blobs( + None, + Arc::new(signed_block), + harness.sampling_column_count, + ); let process_res = harness .chain .process_block( - signed_block.canonical_root(), - Arc::new(signed_block), + rpc_block.block_root(), + rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1541,12 +1549,13 @@ async fn add_base_block_to_altair_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. + let base_rpc_block = RpcBlock::new_without_blobs(None, Arc::new(base_block.clone()), 0); assert!(matches!( harness .chain .process_block( - base_block.canonical_root(), - Arc::new(base_block.clone()), + base_rpc_block.block_root(), + base_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1564,7 +1573,7 @@ async fn add_base_block_to_altair_chain() { harness .chain .process_chain_segment( - vec![RpcBlock::new_without_blobs(None, Arc::new(base_block))], + vec![RpcBlock::new_without_blobs(None, Arc::new(base_block), 0)], NotifyExecutionLayer::Yes, ) .await, @@ -1677,12 +1686,13 @@ async fn add_altair_block_to_base_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. + let altair_rpc_block = RpcBlock::new_without_blobs(None, Arc::new(altair_block.clone()), 0); assert!(matches!( harness .chain .process_block( - altair_block.canonical_root(), - Arc::new(altair_block.clone()), + altair_rpc_block.block_root(), + altair_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1700,7 +1710,7 @@ async fn add_altair_block_to_base_chain() { harness .chain .process_chain_segment( - vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block))], + vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block), 0)], NotifyExecutionLayer::Yes ) .await, @@ -1761,11 +1771,16 @@ async fn import_duplicate_block_unrealized_justification() { // Create two verified variants of the block, representing the same block being processed in // parallel. let notify_execution_layer = NotifyExecutionLayer::Yes; - let verified_block1 = block + let rpc_block = RpcBlock::new_without_blobs( + Some(block_root), + block.clone(), + harness.sampling_column_count, + ); + let verified_block1 = rpc_block .clone() .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); - let verified_block2 = block + let verified_block2 = rpc_block .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 88180f3c94..c6fc3416e0 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,5 +1,6 @@ #![cfg(not(debug_assertions))] +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::{ canonical_head::{CachedHead, CanonicalHead}, test_utils::{BeaconChainHarness, EphemeralHarnessType}, @@ -687,12 +688,14 @@ async fn invalidates_all_descendants() { assert_eq!(fork_parent_state.slot(), fork_parent_slot); let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; + let fork_rpc_block = + RpcBlock::new_without_blobs(None, fork_block.clone(), rig.harness.sampling_column_count); let fork_block_root = rig .harness .chain .process_block( - fork_block.canonical_root(), - fork_block, + fork_rpc_block.block_root(), + fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -788,12 +791,14 @@ async fn switches_heads() { let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); + let fork_rpc_block = + RpcBlock::new_without_blobs(None, fork_block.clone(), rig.harness.sampling_column_count); let fork_block_root = rig .harness .chain .process_block( - fork_block.canonical_root(), - fork_block, + fork_rpc_block.block_root(), + fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1057,8 +1062,10 @@ async fn invalid_parent() { )); // Ensure the block built atop an invalid payload is invalid for import. + let rpc_block = + RpcBlock::new_without_blobs(None, block.clone(), rig.harness.sampling_column_count); assert!(matches!( - rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, + rig.harness.chain.process_block(rpc_block.block_root(), rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) @@ -1282,7 +1289,7 @@ impl InvalidHeadSetup { /// /// 1. A chain where the only viable head block has an invalid execution payload. /// 2. A block (`fork_block`) which will become the head of the chain when - /// it is imported. + /// it is imported. async fn new() -> InvalidHeadSetup { let slots_per_epoch = E::slots_per_epoch(); let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -1380,11 +1387,13 @@ async fn recover_from_invalid_head_by_importing_blocks() { } = InvalidHeadSetup::new().await; // Import the fork block, it should become the head. + let fork_rpc_block = + RpcBlock::new_without_blobs(None, fork_block.clone(), rig.harness.sampling_column_count); rig.harness .chain .process_block( - fork_block.canonical_root(), - fork_block.clone(), + fork_rpc_block.block_root(), + fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1419,8 +1428,8 @@ async fn recover_from_invalid_head_after_persist_and_reboot() { let slot_clock = rig.harness.chain.slot_clock.clone(); - // Forcefully persist the head and fork choice. - rig.harness.chain.persist_head_and_fork_choice().unwrap(); + // Forcefully persist fork choice. + rig.harness.chain.persist_fork_choice().unwrap(); let resumed = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 38ff87d0c8..3343dc101b 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1,6 +1,7 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_verification::Error as AttnError; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; @@ -16,6 +17,7 @@ use beacon_chain::{ }; use logging::create_test_tracing_subscriber; use maplit::hashset; +use rand::rngs::StdRng; use rand::Rng; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::{state_advance::complete_state_advance, BlockReplayer}; @@ -31,7 +33,6 @@ use store::{ BlobInfo, DBColumn, HotColdDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; -use tokio::time::sleep; use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; @@ -120,6 +121,17 @@ fn get_harness_generic( harness } +fn count_states_descendant_of_block( + store: &HotColdDB, BeaconNodeBackend>, + block_root: Hash256, +) -> usize { + let summaries = store.load_hot_state_summaries().unwrap(); + summaries + .iter() + .filter(|(_, s)| s.latest_block_root == block_root) + .count() +} + #[tokio::test] async fn light_client_bootstrap_test() { let spec = test_spec::(); @@ -1225,7 +1237,7 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { assert_eq!(rig.get_finalized_checkpoints(), hashset! {}); - assert!(rig.chain.knows_head(&stray_head)); + rig.assert_knows_head(stray_head.into()); // Trigger finalization let finalization_slots: Vec = ((canonical_chain_slot + 1) @@ -1273,7 +1285,7 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() { ); } - assert!(!rig.chain.knows_head(&stray_head)); + assert!(!rig.knows_head(&stray_head)); } #[tokio::test] @@ -1399,7 +1411,7 @@ async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { ); } - assert!(!rig.chain.knows_head(&stray_head)); + assert!(!rig.knows_head(&stray_head)); let chain_dump = rig.chain.chain_dump().unwrap(); assert!(get_blocks(&chain_dump).contains(&shared_head)); } @@ -1492,7 +1504,7 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() { ); } - assert!(rig.chain.knows_head(&stray_head)); + rig.assert_knows_head(stray_head.into()); } #[tokio::test] @@ -1576,7 +1588,7 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { // Precondition: Nothing is finalized yet assert_eq!(rig.get_finalized_checkpoints(), hashset! {},); - assert!(rig.chain.knows_head(&stray_head)); + rig.assert_knows_head(stray_head.into()); // Trigger finalization let canonical_slots: Vec = (rig.epoch_start_slot(2)..=rig.epoch_start_slot(6)) @@ -1631,7 +1643,7 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { ); } - assert!(!rig.chain.knows_head(&stray_head)); + assert!(!rig.knows_head(&stray_head)); } // This is to check if state outside of normal block processing are pruned correctly. @@ -2150,64 +2162,6 @@ async fn pruning_test( check_no_blocks_exist(&harness, stray_blocks.values()); } -#[tokio::test] -async fn garbage_collect_temp_states_from_failed_block_on_startup() { - let db_path = tempdir().unwrap(); - - // Wrap these functions to ensure the variables are dropped before we try to open another - // instance of the store. - let mut store = { - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - - let slots_per_epoch = E::slots_per_epoch(); - - let genesis_state = harness.get_current_state(); - let block_slot = Slot::new(2 * slots_per_epoch); - let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; - - let (mut block, _) = (*signed_block).clone().deconstruct(); - - // Mutate the block to make it invalid, and re-sign it. - *block.state_root_mut() = Hash256::repeat_byte(0xff); - let proposer_index = block.proposer_index() as usize; - let block = Arc::new(block.sign( - &harness.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), - &harness.spec, - )); - - // The block should be rejected, but should store a bunch of temporary states. - harness.set_current_slot(block_slot); - harness - .process_block_result((block, None)) - .await - .unwrap_err(); - - assert_eq!( - store.iter_temporary_state_roots().count(), - block_slot.as_usize() - 1 - ); - store - }; - - // Wait until all the references to the store have been dropped, this helps ensure we can - // re-open the store later. - loop { - store = if let Err(store_arc) = Arc::try_unwrap(store) { - sleep(Duration::from_millis(500)).await; - store_arc - } else { - break; - } - } - - // On startup, the store should garbage collect all the temporary states. - let store = get_store(&db_path); - assert_eq!(store.iter_temporary_state_roots().count(), 0); -} - #[tokio::test] async fn garbage_collect_temp_states_from_failed_block_on_finalization() { let db_path = tempdir().unwrap(); @@ -2222,6 +2176,7 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; let (mut block, _) = (*signed_block).clone().deconstruct(); + let bad_block_parent_root = block.parent_root(); // Mutate the block to make it invalid, and re-sign it. *block.state_root_mut() = Hash256::repeat_byte(0xff); @@ -2240,9 +2195,11 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { .await .unwrap_err(); + // The bad block parent root is the genesis block root. There's `block_slot - 1` temporary + // states to remove + the genesis state = block_slot. assert_eq!( - store.iter_temporary_state_roots().count(), - block_slot.as_usize() - 1 + count_states_descendant_of_block(&store, bad_block_parent_root), + block_slot.as_usize(), ); // Finalize the chain without the block, which should result in pruning of all temporary states. @@ -2259,8 +2216,12 @@ async fn garbage_collect_temp_states_from_failed_block_on_finalization() { // Check that the finalization migration ran. assert_ne!(store.get_split_slot(), 0); - // Check that temporary states have been pruned. - assert_eq!(store.iter_temporary_state_roots().count(), 0); + // Check that temporary states have been pruned. The genesis block is not a descendant of the + // latest finalized checkpoint, so all its states have been pruned from the hot DB, = 0. + assert_eq!( + count_states_descendant_of_block(&store, bad_block_parent_root), + 0 + ); } #[tokio::test] @@ -2414,6 +2375,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .chain_config(ChainConfig::default()) .event_handler(Some(ServerSentEventHandler::new_with_capacity(1))) .execution_layer(Some(mock.el)) + .rng(Box::new(StdRng::seed_from_u64(42))) .build() .expect("should build"); @@ -2682,12 +2644,17 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert_eq!(split.block_root, valid_fork_block.parent_root()); assert_ne!(split.state_root, unadvanced_split_state_root); + let invalid_fork_rpc_block = RpcBlock::new_without_blobs( + None, + invalid_fork_block.clone(), + harness.sampling_column_count, + ); // Applying the invalid block should fail. let err = harness .chain .process_block( - invalid_fork_block.canonical_root(), - invalid_fork_block.clone(), + invalid_fork_rpc_block.block_root(), + invalid_fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -2697,11 +2664,16 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); // Applying the valid block should succeed, but it should not become head. + let valid_fork_rpc_block = RpcBlock::new_without_blobs( + None, + valid_fork_block.clone(), + harness.sampling_column_count, + ); harness .chain .process_block( - valid_fork_block.canonical_root(), - valid_fork_block.clone(), + valid_fork_rpc_block.block_root(), + valid_fork_rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -2785,8 +2757,8 @@ async fn finalizes_after_resuming_from_db() { harness .chain - .persist_head_and_fork_choice() - .expect("should persist the head and fork choice"); + .persist_fork_choice() + .expect("should persist fork choice"); harness .chain .persist_op_pool() @@ -2999,11 +2971,13 @@ async fn revert_minority_fork_on_resume() { resumed_harness.chain.recompute_head_at_current_slot().await; assert_eq!(resumed_harness.head_slot(), fork_slot - 1); - // Head track should know the canonical head and the rogue head. - assert_eq!(resumed_harness.chain.heads().len(), 2); - assert!(resumed_harness - .chain - .knows_head(&resumed_harness.head_block_root().into())); + // Fork choice should only know the canonical head. When we reverted the head we also should + // have called `reset_fork_choice_to_finalization` which rebuilds fork choice from scratch + // without the reverted block. + assert_eq!( + resumed_harness.chain.heads(), + vec![(resumed_harness.head_block_root(), fork_slot - 1)] + ); // Apply blocks from the majority chain and trigger finalization. let initial_split_slot = resumed_harness.chain.store.get_split_slot(); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index e11fc23072..195c53c4a0 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -31,6 +31,7 @@ logging = { workspace = true } metrics = { workspace = true } monitoring_api = { workspace = true } network = { workspace = true } +rand = { workspace = true } sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c8ff6521c8..3cb7b33aae 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -33,6 +33,8 @@ use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; +use rand::rngs::{OsRng, StdRng}; +use rand::SeedableRng; use slasher::Slasher; use slasher_service::SlasherService; use std::net::TcpListener; @@ -210,7 +212,10 @@ where .event_handler(event_handler) .execution_layer(execution_layer) .import_all_data_columns(config.network.subscribe_all_data_column_subnets) - .validator_monitor_config(config.validator_monitor.clone()); + .validator_monitor_config(config.validator_monitor.clone()) + .rng(Box::new( + StdRng::from_rng(OsRng).map_err(|e| format!("Failed to create RNG: {:?}", e))?, + )); let builder = if let Some(slasher) = self.slasher.clone() { builder.slasher(slasher) diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index aed6cdba67..4bfee223ff 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,10 +1,11 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, - ENGINE_GET_BLOBS_V1, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, - ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, - ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, + ENGINE_GET_BLOBS_V1, ENGINE_GET_BLOBS_V2, ENGINE_GET_CLIENT_VERSION_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, + ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, + ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, + ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -553,6 +554,7 @@ pub struct EngineCapabilities { pub get_payload_v5: bool, pub get_client_version_v1: bool, pub get_blobs_v1: bool, + pub get_blobs_v2: bool, } impl EngineCapabilities { @@ -609,6 +611,9 @@ impl EngineCapabilities { if self.get_blobs_v1 { response.push(ENGINE_GET_BLOBS_V1); } + if self.get_blobs_v2 { + response.push(ENGINE_GET_BLOBS_V2); + } response } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 747383754a..bf4c391a8d 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -61,6 +61,7 @@ pub const ENGINE_GET_CLIENT_VERSION_V1: &str = "engine_getClientVersionV1"; pub const ENGINE_GET_CLIENT_VERSION_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_GET_BLOBS_V1: &str = "engine_getBlobsV1"; +pub const ENGINE_GET_BLOBS_V2: &str = "engine_getBlobsV2"; pub const ENGINE_GET_BLOBS_TIMEOUT: Duration = Duration::from_secs(1); /// This error is returned during a `chainId` call by Geth. @@ -87,6 +88,7 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_BLOBS_V1, + ENGINE_GET_BLOBS_V2, ]; /// We opt to initialize the JsonClientVersionV1 rather than the ClientVersionV1 @@ -708,7 +710,7 @@ impl HttpJsonRpc { } } - pub async fn get_blobs( + pub async fn get_blobs_v1( &self, versioned_hashes: Vec, ) -> Result>>, Error> { @@ -722,6 +724,20 @@ impl HttpJsonRpc { .await } + pub async fn get_blobs_v2( + &self, + versioned_hashes: Vec, + ) -> Result>>, Error> { + let params = json!([versioned_hashes]); + + self.rpc_request( + ENGINE_GET_BLOBS_V2, + params, + ENGINE_GET_BLOBS_TIMEOUT * self.execution_timeout_multiplier, + ) + .await + } + pub async fn get_block_by_number( &self, query: BlockByNumberQuery<'_>, @@ -963,19 +979,6 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } - // TODO(fulu): remove when v5 method is ready. - ForkName::Fulu => { - let response: JsonGetPayloadResponseV5 = self - .rpc_request( - ENGINE_GET_PAYLOAD_V4, - params, - ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, - ) - .await?; - JsonGetPayloadResponse::V5(response) - .try_into() - .map_err(Error::BadResponse) - } _ => Err(Error::UnsupportedForkVariant(format!( "called get_payload_v4 with {}", fork_name @@ -1148,6 +1151,7 @@ impl HttpJsonRpc { get_payload_v5: capabilities.contains(ENGINE_GET_PAYLOAD_V5), get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), get_blobs_v1: capabilities.contains(ENGINE_GET_BLOBS_V1), + get_blobs_v2: capabilities.contains(ENGINE_GET_BLOBS_V2), }) } @@ -1320,9 +1324,8 @@ impl HttpJsonRpc { } } ForkName::Fulu => { - // TODO(fulu): switch to v5 when the EL is ready - if engine_capabilities.get_payload_v4 { - self.get_payload_v4(fork_name, payload_id).await + if engine_capabilities.get_payload_v5 { + self.get_payload_v5(fork_name, payload_id).await } else { Err(Error::RequiredMethodUnsupported("engine_getPayloadv5")) } diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 96615297d8..30d30481ea 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -717,12 +717,23 @@ impl From> for BlobsBundle { } } +#[superstruct( + variants(V1, V2), + variant_attributes( + derive(Debug, Clone, PartialEq, Serialize, Deserialize), + serde(bound = "E: EthSpec", rename_all = "camelCase") + ) +)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(bound = "E: EthSpec", rename_all = "camelCase")] -pub struct BlobAndProofV1 { +pub struct BlobAndProof { #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub blob: Blob, + /// KZG proof for the blob (Deneb) + #[superstruct(only(V1))] pub proof: KzgProof, + /// KZG cell proofs for the extended blob (PeerDAS) + #[superstruct(only(V2))] + pub proofs: KzgProofs, } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 6644e46a0d..bbdf1a054b 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,7 +4,7 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. -use crate::json_structures::BlobAndProofV1; +use crate::json_structures::{BlobAndProofV1, BlobAndProofV2}; use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; use auth::{strip_prefix, Auth, JwtKey}; @@ -16,8 +16,8 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; -use eth2::types::FullPayloadContents; -use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedResponse}; +use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse}; +use eth2::types::{BlobsBundle, FullPayloadContents}; use ethers_core::types::Transaction as EthersTransaction; use fixed_bytes::UintExtended; use fork_choice::ForkchoiceUpdateParameters; @@ -210,6 +210,7 @@ pub enum BlockProposalContents> { /// `None` for blinded `PayloadAndBlobs`. blobs_and_proofs: Option<(BlobsList, KzgProofs)>, // TODO(electra): this should probably be a separate variant/superstruct + // See: https://github.com/sigp/lighthouse/issues/6981 requests: Option>, }, } @@ -596,13 +597,7 @@ impl ExecutionLayer { let (payload_ref, maybe_json_blobs_bundle) = payload_and_blobs; let payload = payload_ref.clone_from_ref(); - let maybe_blobs_bundle = maybe_json_blobs_bundle - .cloned() - .map(|blobs_bundle| BlobsBundle { - commitments: blobs_bundle.commitments, - proofs: blobs_bundle.proofs, - blobs: blobs_bundle.blobs, - }); + let maybe_blobs_bundle = maybe_json_blobs_bundle.cloned(); self.inner .payload_cache @@ -1682,7 +1677,7 @@ impl ExecutionLayer { /// /// - `Some(true)` if the given `block_hash` is the terminal proof-of-work block. /// - `Some(false)` if the given `block_hash` is certainly *not* the terminal proof-of-work - /// block. + /// block. /// - `None` if the `block_hash` or its parent were not present on the execution engine. /// - `Err(_)` if there was an error connecting to the execution engine. /// @@ -1846,7 +1841,7 @@ impl ExecutionLayer { } } - pub async fn get_blobs( + pub async fn get_blobs_v1( &self, query: Vec, ) -> Result>>, Error> { @@ -1854,7 +1849,24 @@ impl ExecutionLayer { if capabilities.get_blobs_v1 { self.engine() - .request(|engine| async move { engine.api.get_blobs(query).await }) + .request(|engine| async move { engine.api.get_blobs_v1(query).await }) + .await + .map_err(Box::new) + .map_err(Error::EngineError) + } else { + Err(Error::GetBlobsNotSupported) + } + } + + pub async fn get_blobs_v2( + &self, + query: Vec, + ) -> Result>>, Error> { + let capabilities = self.get_engine_capabilities(None).await?; + + if capabilities.get_blobs_v2 { + self.engine() + .request(|engine| async move { engine.api.get_blobs_v2(query).await }) .await .map_err(Box::new) .map_err(Error::EngineError) diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 81fb9bd7b8..b057abe887 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -20,13 +20,14 @@ use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, - ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, Transaction, Transactions, - Uint256, + ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, KzgProofs, Transaction, + Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); +const TEST_BLOB_BUNDLE_V2: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle_v2.ssz"); pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; const GAS_USED: u64 = DEFAULT_GAS_LIMIT - 1; @@ -697,15 +698,13 @@ impl ExecutionBlockGenerator { }, }; - if execution_payload.fork_name().deneb_enabled() { + let fork_name = execution_payload.fork_name(); + if fork_name.deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - let max_blobs = self - .spec - .max_blobs_per_block_by_fork(execution_payload.fork_name()) - as usize; + let max_blobs = self.spec.max_blobs_per_block_by_fork(fork_name) as usize; let num_blobs = rng.gen::() % (max_blobs + 1); - let (bundle, transactions) = generate_blobs(num_blobs)?; + let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { execution_payload .transactions_mut() @@ -721,7 +720,8 @@ impl ExecutionBlockGenerator { } } -pub fn load_test_blobs_bundle() -> Result<(KzgCommitment, KzgProof, Blob), String> { +pub fn load_test_blobs_bundle_v1() -> Result<(KzgCommitment, KzgProof, Blob), String> +{ let BlobsBundle:: { commitments, proofs, @@ -745,32 +745,56 @@ pub fn load_test_blobs_bundle() -> Result<(KzgCommitment, KzgProof, )) } +pub fn load_test_blobs_bundle_v2( +) -> Result<(KzgCommitment, KzgProofs, Blob), String> { + let BlobsBundle:: { + commitments, + proofs, + blobs, + } = BlobsBundle::from_ssz_bytes(TEST_BLOB_BUNDLE_V2) + .map_err(|e| format!("Unable to decode ssz: {:?}", e))?; + + Ok(( + commitments + .first() + .cloned() + .ok_or("commitment missing in test bundle")?, + // there's only one blob in the test bundle, hence we take all the cell proofs here. + proofs, + blobs + .first() + .cloned() + .ok_or("blob missing in test bundle")?, + )) +} + pub fn generate_blobs( n_blobs: usize, + fork_name: ForkName, ) -> Result<(BlobsBundle, Transactions), String> { - let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + let tx = static_valid_tx::() + .map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?; + let transactions = vec![tx; n_blobs]; - let mut bundle = BlobsBundle::::default(); - let mut transactions = vec![]; - - for blob_index in 0..n_blobs { - let tx = static_valid_tx::() - .map_err(|e| format!("error creating valid tx SSZ bytes: {:?}", e))?; - - transactions.push(tx); - bundle - .blobs - .push(blob.clone()) - .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; - bundle - .commitments - .push(kzg_commitment) - .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; - bundle - .proofs - .push(kzg_proof) - .map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?; - } + let bundle = if fork_name.fulu_enabled() { + let (kzg_commitment, kzg_proofs, blob) = load_test_blobs_bundle_v2::()?; + BlobsBundle { + commitments: vec![kzg_commitment; n_blobs].into(), + proofs: vec![kzg_proofs.to_vec(); n_blobs] + .into_iter() + .flatten() + .collect::>() + .into(), + blobs: vec![blob; n_blobs].into(), + } + } else { + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle_v1::()?; + BlobsBundle { + commitments: vec![kzg_commitment; n_blobs].into(), + proofs: vec![kzg_proof; n_blobs].into(), + blobs: vec![blob; n_blobs].into(), + } + }; Ok((bundle, transactions.into())) } @@ -905,7 +929,7 @@ pub fn generate_pow_block( #[cfg(test)] mod test { use super::*; - use kzg::{trusted_setup::get_trusted_setup, TrustedSetup}; + use kzg::{trusted_setup::get_trusted_setup, Bytes48, CellRef, KzgBlobRef, TrustedSetup}; use types::{MainnetEthSpec, MinimalEthSpec}; #[test] @@ -974,20 +998,28 @@ mod test { } #[test] - fn valid_test_blobs() { + fn valid_test_blobs_bundle_v1() { assert!( - validate_blob::().is_ok(), + validate_blob_bundle_v1::().is_ok(), "Mainnet preset test blobs bundle should contain valid proofs" ); assert!( - validate_blob::().is_ok(), + validate_blob_bundle_v1::().is_ok(), "Minimal preset test blobs bundle should contain valid proofs" ); } - fn validate_blob() -> Result<(), String> { + #[test] + fn valid_test_blobs_bundle_v2() { + validate_blob_bundle_v2::() + .expect("Mainnet preset test blobs bundle v2 should contain valid proofs"); + validate_blob_bundle_v2::() + .expect("Minimal preset test blobs bundle v2 should contain valid proofs"); + } + + fn validate_blob_bundle_v1() -> Result<(), String> { let kzg = load_kzg()?; - let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; + let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle_v1::()?; let kzg_blob = kzg::Blob::from_bytes(blob.as_ref()) .map(Box::new) .map_err(|e| format!("Error converting blob to kzg blob: {e:?}"))?; @@ -995,6 +1027,26 @@ mod test { .map_err(|e| format!("Invalid blobs bundle: {e:?}")) } + fn validate_blob_bundle_v2() -> Result<(), String> { + let kzg = load_kzg()?; + let (kzg_commitments, kzg_proofs, cells) = + load_test_blobs_bundle_v2::().map(|(commitment, proofs, blob)| { + let kzg_blob: KzgBlobRef = blob.as_ref().try_into().unwrap(); + ( + vec![Bytes48::from(commitment); proofs.len()], + proofs.into_iter().map(|p| p.into()).collect::>(), + kzg.compute_cells(kzg_blob).unwrap(), + ) + })?; + let (cell_indices, cell_refs): (Vec, Vec) = cells + .iter() + .enumerate() + .map(|(cell_idx, cell)| (cell_idx as u64, CellRef::try_from(cell.as_ref()).unwrap())) + .unzip(); + kzg.verify_cell_proof_batch(&cell_refs, &kzg_proofs, cell_indices, &kzg_commitments) + .map_err(|e| format!("Invalid blobs bundle: {e:?}")) + } + fn load_kzg() -> Result { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) diff --git a/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle_v2.ssz b/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle_v2.ssz new file mode 100644 index 0000000000..e57096c076 Binary files /dev/null and b/beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle_v2.ssz differ diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index d727d2c159..70c21afed4 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -383,9 +383,8 @@ pub async fn handle_rpc( == ForkName::Fulu && (method == ENGINE_GET_PAYLOAD_V1 || method == ENGINE_GET_PAYLOAD_V2 - || method == ENGINE_GET_PAYLOAD_V3) - // TODO(fulu): Uncomment this once v5 method is ready for Fulu - // || method == ENGINE_GET_PAYLOAD_V4) + || method == ENGINE_GET_PAYLOAD_V3 + || method == ENGINE_GET_PAYLOAD_V4) { return Err(( format!("{} called after Fulu fork!", method), @@ -451,22 +450,6 @@ pub async fn handle_rpc( }) .unwrap() } - // TODO(fulu): remove this once we switch to v5 method - JsonExecutionPayload::V5(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseV5 { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V5 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - execution_requests: Default::default(), - }) - .unwrap() - } _ => unreachable!(), }), ENGINE_GET_PAYLOAD_V5 => Ok(match JsonExecutionPayload::from(response) { diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index fba34121a7..87ea8642be 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -546,7 +546,7 @@ impl MockBuilder { .map_err(|_| "incorrect payload variant".to_string())? .into(), blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) + .map(|b| b.commitments.clone()) .unwrap_or_default(), value: self.get_bid_value(value), pubkey: self.builder_sk.public_key().compress(), @@ -558,7 +558,7 @@ impl MockBuilder { .map_err(|_| "incorrect payload variant".to_string())? .into(), blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) + .map(|b| b.commitments.clone()) .unwrap_or_default(), value: self.get_bid_value(value), pubkey: self.builder_sk.public_key().compress(), @@ -570,7 +570,7 @@ impl MockBuilder { .map_err(|_| "incorrect payload variant".to_string())? .into(), blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments) + .map(|b| b.commitments.clone()) .unwrap_or_default(), value: self.get_bid_value(value), pubkey: self.builder_sk.public_key().compress(), diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 17441a15fb..245aa71a15 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -58,6 +58,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v5: true, get_client_version_v1: true, get_blobs_v1: true, + get_blobs_v2: true, }; pub static DEFAULT_CLIENT_VERSION: LazyLock = diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index b13517f27e..afc68ad96d 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -28,6 +28,7 @@ metrics = { workspace = true } network = { workspace = true } operation_pool = { workspace = true } parking_lot = { workspace = true } +proto_array = { workspace = true } rand = { workspace = true } safe_arith = { workspace = true } sensitive_url = { workspace = true } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index eb188a9b19..412b756684 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -68,6 +68,7 @@ use serde_json::Value; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; +use std::collections::HashSet; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; @@ -86,13 +87,14 @@ use tokio_stream::{ StreamExt, }; use tracing::{debug, error, info, warn}; +use types::AttestationData; use types::{ - fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, - AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, - Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, - ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, - SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, + fork_versioned_response::EmptyMetadata, Attestation, AttestationShufflingId, AttesterSlashing, + BeaconStateError, ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, + ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, + RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -1145,6 +1147,39 @@ pub fn serve( }, ); + // GET beacon/states/{state_id}/pending_consolidations + let get_beacon_state_pending_consolidations = beacon_states_path + .clone() + .and(warp::path("pending_consolidations")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(consolidations) = state.pending_consolidations() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending consolidations not found".to_string(), + )); + }; + + Ok((consolidations.clone(), execution_optimistic, finalized)) + }, + )?; + + Ok(api_types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ); + // GET beacon/headers // // Note: this endpoint only returns information about blocks in the canonical chain. Given that @@ -1927,11 +1962,11 @@ pub fn serve( chain: Arc>, query: api_types::AttestationPoolQuery| { task_spawner.blocking_response_task(Priority::P1, move || { - let query_filter = |data: &AttestationData| { + let query_filter = |data: &AttestationData, committee_indices: HashSet| { query.slot.is_none_or(|slot| slot == data.slot) && query .committee_index - .is_none_or(|index| index == data.index) + .is_none_or(|index| committee_indices.contains(&index)) }; let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); @@ -1940,7 +1975,9 @@ pub fn serve( .naive_aggregation_pool .read() .iter() - .filter(|&att| query_filter(att.data())) + .filter(|&att| { + query_filter(att.data(), att.get_committee_indices_map()) + }) .cloned(), ); // Use the current slot to find the fork version, and convert all messages to the @@ -4737,6 +4774,7 @@ pub fn serve( .uor(get_beacon_state_randao) .uor(get_beacon_state_pending_deposits) .uor(get_beacon_state_pending_partial_withdrawals) + .uor(get_beacon_state_pending_consolidations) .uor(get_beacon_headers) .uor(get_beacon_headers_block_id) .uor(get_beacon_block) diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index ac8c08581c..2d0a5d09a1 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -4,7 +4,7 @@ use crate::version::{ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{ self as api_types, ChainSpec, ForkVersionedResponse, LightClientUpdate, - LightClientUpdateResponseChunk, LightClientUpdateSszResponse, LightClientUpdatesQuery, + LightClientUpdateResponseChunk, LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; @@ -37,15 +37,9 @@ pub fn get_light_client_updates( .map(|update| map_light_client_update_to_ssz_chunk::(&chain, update)) .collect::>(); - let ssz_response = LightClientUpdateSszResponse { - response_chunk_len: (light_client_updates.len() as u64).to_le_bytes().to_vec(), - response_chunk: response_chunks.as_ssz_bytes(), - } - .as_ssz_bytes(); - Response::builder() .status(200) - .body(ssz_response) + .body(response_chunks.as_ssz_bytes()) .map(|res: Response>| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -159,16 +153,24 @@ fn map_light_client_update_to_ssz_chunk( ) -> LightClientUpdateResponseChunk { let fork_name = chain .spec - .fork_name_at_slot::(*light_client_update.signature_slot()); + .fork_name_at_slot::(light_client_update.attested_header_slot()); let fork_digest = ChainSpec::compute_fork_digest( chain.spec.fork_version_for_name(fork_name), chain.genesis_validators_root, ); - LightClientUpdateResponseChunk { + let payload = light_client_update.as_ssz_bytes(); + let response_chunk_len = fork_digest.len() + payload.len(); + + let response_chunk = LightClientUpdateResponseChunkInner { context: fork_digest, - payload: light_client_update.as_ssz_bytes(), + payload, + }; + + LightClientUpdateResponseChunk { + response_chunk_len: response_chunk_len as u64, + response_chunk, } } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index a5cd94536d..b613cf8467 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -2,7 +2,7 @@ use crate::metrics; use std::future::Future; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; -use beacon_chain::block_verification_types::AsBlock; +use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ @@ -302,7 +302,11 @@ pub async fn publish_block>( ); let import_result = Box::pin(chain.process_block( block_root, - block.clone(), + RpcBlock::new_without_blobs( + Some(block_root), + block.clone(), + network_globals.custody_columns_count() as usize, + ), NotifyExecutionLayer::Yes, BlockImportSource::HttpApi, publish_fn, @@ -364,7 +368,7 @@ fn spawn_build_data_sidecar_task( } else { // Post PeerDAS: construct data columns. let gossip_verified_data_columns = - build_gossip_verified_data_columns(&chain, &block, blobs)?; + build_gossip_verified_data_columns(&chain, &block, blobs, kzg_proofs)?; Ok((vec![], gossip_verified_data_columns)) } }, @@ -383,10 +387,11 @@ fn build_gossip_verified_data_columns( chain: &BeaconChain, block: &SignedBeaconBlock>, blobs: BlobsList, + kzg_cell_proofs: KzgProofs, ) -> Result>>, Rejection> { let slot = block.slot(); let data_column_sidecars = - build_blob_data_column_sidecars(chain, block, blobs).map_err(|e| { + build_blob_data_column_sidecars(chain, block, blobs, kzg_cell_proofs).map_err(|e| { error!( error = ?e, %slot, @@ -520,7 +525,7 @@ fn publish_column_sidecars( .len() .saturating_sub(malicious_withhold_count); // Randomize columns before dropping the last malicious_withhold_count items - data_column_sidecars.shuffle(&mut rand::thread_rng()); + data_column_sidecars.shuffle(&mut **chain.rng.lock()); data_column_sidecars.truncate(columns_to_keep); } let pubsub_messages = data_column_sidecars diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 6d407d2742..31fecfeb99 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -27,6 +27,7 @@ use http_api::{ }; use lighthouse_network::{types::SyncState, Enr, EnrExt, PeerId}; use network::NetworkReceivers; +use operation_pool::attestation_storage::CheckpointKey; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; @@ -1241,6 +1242,33 @@ impl ApiTester { self } + pub async fn test_beacon_states_pending_consolidations(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = state_id + .state(&self.chain) + .ok() + .map(|(state, _execution_optimistic, _finalized)| state); + + let result = self + .client + .get_beacon_states_pending_consolidations(state_id.0) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + let expected = state.pending_consolidations().unwrap(); + + assert_eq!(result.unwrap(), expected.to_vec()); + } + + self + } + pub async fn test_beacon_headers_all_slots(self) -> Self { for slot in 0..CHAIN_LENGTH { let slot = Slot::from(slot); @@ -2087,7 +2115,7 @@ impl ApiTester { self } - pub async fn test_get_beacon_pool_attestations(self) -> Self { + pub async fn test_get_beacon_pool_attestations(self) { let result = self .client .get_beacon_pool_attestations_v1(None, None) @@ -2106,9 +2134,80 @@ impl ApiTester { .await .unwrap() .data; + assert_eq!(result, expected); - self + let result_committee_index_filtered = self + .client + .get_beacon_pool_attestations_v1(None, Some(0)) + .await + .unwrap() + .data; + + let expected_committee_index_filtered = expected + .clone() + .into_iter() + .filter(|att| att.get_committee_indices_map().contains(&0)) + .collect::>(); + + assert_eq!( + result_committee_index_filtered, + expected_committee_index_filtered + ); + + let result_committee_index_filtered = self + .client + .get_beacon_pool_attestations_v1(None, Some(1)) + .await + .unwrap() + .data; + + let expected_committee_index_filtered = expected + .clone() + .into_iter() + .filter(|att| att.get_committee_indices_map().contains(&1)) + .collect::>(); + + assert_eq!( + result_committee_index_filtered, + expected_committee_index_filtered + ); + + let fork_name = self + .harness + .chain + .spec + .fork_name_at_slot::(self.harness.chain.slot().unwrap()); + + // aggregate electra attestations + if fork_name.electra_enabled() { + // Take and drop the lock in a block to avoid clippy complaining + // about taking locks across await points + { + let mut all_attestations = self.chain.op_pool.attestations.write(); + let (prev_epoch_key, curr_epoch_key) = + CheckpointKey::keys_for_state(&self.harness.get_current_state()); + all_attestations.aggregate_across_committees(prev_epoch_key); + all_attestations.aggregate_across_committees(curr_epoch_key); + } + let result_committee_index_filtered = self + .client + .get_beacon_pool_attestations_v2(None, Some(0)) + .await + .unwrap() + .data; + let mut expected = self.chain.op_pool.get_all_attestations(); + expected.extend(self.chain.naive_aggregation_pool.read().iter().cloned()); + let expected_committee_index_filtered = expected + .clone() + .into_iter() + .filter(|att| att.get_committee_indices_map().contains(&0)) + .collect::>(); + assert_eq!( + result_committee_index_filtered, + expected_committee_index_filtered + ); + } } pub async fn test_post_beacon_pool_attester_slashings_valid_v1(mut self) -> Self { @@ -6386,6 +6485,8 @@ async fn beacon_get_state_info_electra() { .test_beacon_states_pending_deposits() .await .test_beacon_states_pending_partial_withdrawals() + .await + .test_beacon_states_pending_consolidations() .await; } @@ -6416,10 +6517,30 @@ async fn beacon_get_blocks() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_get_pools() { +async fn test_beacon_pool_attestations_electra() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_beacon_pool_attestations() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_beacon_pool_attestations_base() { ApiTester::new() .await .test_get_beacon_pool_attestations() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_pools() { + ApiTester::new() .await .test_get_beacon_pool_attester_slashings() .await diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 5a6628439e..89d260569a 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -14,7 +14,7 @@ use std::num::NonZeroU16; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; -use types::{ForkContext, ForkName}; +use types::ForkContext; pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; pub const DEFAULT_TCP_PORT: u16 = 9000u16; @@ -22,18 +22,9 @@ pub const DEFAULT_DISC_PORT: u16 = 9000u16; pub const DEFAULT_QUIC_PORT: u16 = 9001u16; pub const DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD: usize = 1000usize; -/// The maximum size of gossip messages. -pub fn gossip_max_size(is_merge_enabled: bool, gossip_max_size: usize) -> usize { - if is_merge_enabled { - gossip_max_size - } else { - gossip_max_size / 10 - } -} - pub struct GossipsubConfigParams { pub message_domain_valid_snappy: [u8; 4], - pub gossip_max_size: usize, + pub gossipsub_max_transmit_size: usize, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -480,7 +471,6 @@ pub fn gossipsub_config( } } let message_domain_valid_snappy = gossipsub_config_params.message_domain_valid_snappy; - let is_bellatrix_enabled = fork_context.fork_exists(ForkName::Bellatrix); let gossip_message_id = move |message: &gossipsub::Message| { gossipsub::MessageId::from( &Sha256::digest( @@ -499,10 +489,7 @@ pub fn gossipsub_config( let duplicate_cache_time = Duration::from_secs(slots_per_epoch * seconds_per_slot * 2); gossipsub::ConfigBuilder::default() - .max_transmit_size(gossip_max_size( - is_bellatrix_enabled, - gossipsub_config_params.gossip_max_size, - )) + .max_transmit_size(gossipsub_config_params.gossipsub_max_transmit_size) .heartbeat_interval(load.heartbeat_interval) .mesh_n(load.mesh_n) .mesh_n_low(load.mesh_n_low) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 2f8fd82c51..40fdd71b38 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -12,7 +12,6 @@ pub mod peer_manager; pub mod rpc; pub mod types; -pub use config::gossip_max_size; use libp2p::swarm::DialError; pub use listen_addr::*; @@ -122,6 +121,6 @@ pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; // pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; -pub use service::api_types::{PeerRequestId, Response}; +pub use service::api_types::Response; pub use service::utils::*; pub use service::{Gossipsub, NetworkEvent}; diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4b48c7e625..c3a44d941a 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -991,23 +991,23 @@ impl PeerManager { /// - Do not prune outbound peers to exceed our outbound target. /// - Do not prune more peers than our target peer count. /// - If we have an option to remove a number of peers, remove ones that have the least - /// long-lived subnets. + /// long-lived subnets. /// - When pruning peers based on subnet count. If multiple peers can be chosen, choose a peer - /// that is not subscribed to a long-lived sync committee subnet. + /// that is not subscribed to a long-lived sync committee subnet. /// - When pruning peers based on subnet count, do not prune a peer that would lower us below the - /// MIN_SYNC_COMMITTEE_PEERS peer count. To keep it simple, we favour a minimum number of sync-committee-peers over - /// uniformity subnet peers. NOTE: We could apply more sophisticated logic, but the code is - /// simpler and easier to maintain if we take this approach. If we are pruning subnet peers - /// below the MIN_SYNC_COMMITTEE_PEERS and maintaining the sync committee peers, this should be - /// fine as subnet peers are more likely to be found than sync-committee-peers. Also, we're - /// in a bit of trouble anyway if we have so few peers on subnets. The - /// MIN_SYNC_COMMITTEE_PEERS - /// number should be set low as an absolute lower bound to maintain peers on the sync - /// committees. + /// MIN_SYNC_COMMITTEE_PEERS peer count. To keep it simple, we favour a minimum number of sync-committee-peers over + /// uniformity subnet peers. NOTE: We could apply more sophisticated logic, but the code is + /// simpler and easier to maintain if we take this approach. If we are pruning subnet peers + /// below the MIN_SYNC_COMMITTEE_PEERS and maintaining the sync committee peers, this should be + /// fine as subnet peers are more likely to be found than sync-committee-peers. Also, we're + /// in a bit of trouble anyway if we have so few peers on subnets. The + /// MIN_SYNC_COMMITTEE_PEERS + /// number should be set low as an absolute lower bound to maintain peers on the sync + /// committees. /// - Do not prune trusted peers. NOTE: This means if a user has more trusted peers than the - /// excess peer limit, all of the following logic is subverted as we will not prune any peers. - /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage - /// its peers across the subnets. + /// excess peer limit, all of the following logic is subverted as we will not prune any peers. + /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage + /// its peers across the subnets. /// /// Prune peers in the following order: /// 1. Remove worst scoring peers diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 54e74457b8..083887046a 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -153,7 +153,7 @@ impl PeerDB { matches!( self.connection_status(peer_id), Some(PeerConnectionStatus::Disconnected { .. }) - | Some(PeerConnectionStatus::Unknown { .. }) + | Some(PeerConnectionStatus::Unknown) | None ) && !self.score_state_banned_or_disconnected(peer_id) } @@ -771,8 +771,8 @@ impl PeerDB { NewConnectionState::Connected { .. } // We have established a new connection (peer may not have been seen before) | NewConnectionState::Disconnecting { .. }// We are disconnecting from a peer that may not have been registered before | NewConnectionState::Dialing { .. } // We are dialing a potentially new peer - | NewConnectionState::Disconnected { .. } // Dialing a peer that responds by a different ID can be immediately - // disconnected without having being stored in the db before + | NewConnectionState::Disconnected // Dialing a peer that responds by a different ID can be immediately + // disconnected without having being stored in the db before ) { warn!(%peer_id, ?new_state, "Updating state of unknown peer"); } diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 3adc04eb6a..2612172e61 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -1002,40 +1002,34 @@ mod tests { } /// Bellatrix block with length < max_rpc_size. - fn bellatrix_block_small( - fork_context: &ForkContext, - spec: &ChainSpec, - ) -> SignedBeaconBlock { + fn bellatrix_block_small(spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Bellatrix(block); - assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); + assert!(block.ssz_bytes_len() <= spec.max_payload_size as usize); SignedBeaconBlock::from_block(block, Signature::empty()) } /// Bellatrix block with length > MAX_RPC_SIZE. /// The max limit for a Bellatrix block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a Bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. - fn bellatrix_block_large( - fork_context: &ForkContext, - spec: &ChainSpec, - ) -> SignedBeaconBlock { + fn bellatrix_block_large(spec: &ChainSpec) -> SignedBeaconBlock { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(&Spec::default_spec()); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Bellatrix(block); - assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); + assert!(block.ssz_bytes_len() > spec.max_payload_size as usize); SignedBeaconBlock::from_block(block, Signature::empty()) } @@ -1143,7 +1137,7 @@ mod tests { ) -> Result { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); + let max_packet_size = spec.max_payload_size as usize; let mut buf = BytesMut::new(); let mut snappy_inbound_codec = @@ -1190,7 +1184,7 @@ mod tests { ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); + let max_packet_size = spec.max_payload_size as usize; let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); // decode message just as snappy message @@ -1211,7 +1205,7 @@ mod tests { /// Verifies that requests we send are encoded in a way that we would correctly decode too. fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { let fork_context = Arc::new(fork_context(fork_name)); - let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize); + let max_packet_size = spec.max_payload_size as usize; let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); // Encode a request we send let mut buf = BytesMut::new(); @@ -1588,10 +1582,8 @@ mod tests { )))) ); - let bellatrix_block_small = - bellatrix_block_small(&fork_context(ForkName::Bellatrix), &chain_spec); - let bellatrix_block_large = - bellatrix_block_large(&fork_context(ForkName::Bellatrix), &chain_spec); + let bellatrix_block_small = bellatrix_block_small(&chain_spec); + let bellatrix_block_large = bellatrix_block_large(&chain_spec); assert_eq!( encode_then_decode_response( @@ -2091,7 +2083,7 @@ mod tests { // Insert length-prefix uvi_codec - .encode(chain_spec.max_chunk_size as usize + 1, &mut dst) + .encode(chain_spec.max_payload_size as usize + 1, &mut dst) .unwrap(); // Insert snappy stream identifier @@ -2129,7 +2121,7 @@ mod tests { let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, - max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), + chain_spec.max_payload_size as usize, fork_context, ); @@ -2165,7 +2157,7 @@ mod tests { let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, - max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize), + chain_spec.max_payload_size as usize, fork_context, ); @@ -2194,7 +2186,7 @@ mod tests { let chain_spec = Spec::default_spec(); - let max_rpc_size = max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize); + let max_rpc_size = chain_spec.max_payload_size as usize; let limit = protocol_id.rpc_response_limits::(&fork_context); let mut max = encode_len(limit.max + 1); let mut codec = SSZSnappyOutboundCodec::::new( diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 8353b661c5..b86e2b3a6f 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -4,8 +4,7 @@ use super::methods::{GoodbyeReason, RpcErrorResponse, RpcResponse}; use super::outbound::OutboundRequestContainer; use super::protocol::{InboundOutput, Protocol, RPCError, RPCProtocol, RequestType}; -use super::RequestId; -use super::{RPCReceived, RPCSend, ReqId, Request}; +use super::{RPCReceived, RPCSend, ReqId}; use crate::rpc::outbound::OutboundFramed; use crate::rpc::protocol::InboundFramed; use fnv::FnvHashMap; @@ -91,6 +90,11 @@ pub struct RPCHandler where E: EthSpec, { + /// The PeerId matching this `ConnectionHandler`. + peer_id: PeerId, + + /// The ConnectionId matching this `ConnectionHandler`. + connection_id: ConnectionId, /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, ()>, @@ -139,9 +143,6 @@ where /// Timeout that will me used for inbound and outbound responses. resp_timeout: Duration, - - /// Information about this handler for logging purposes. - log_info: (PeerId, ConnectionId), } enum HandlerState { @@ -228,6 +229,8 @@ where connection_id: ConnectionId, ) -> Self { RPCHandler { + connection_id, + peer_id, listen_protocol, events_out: SmallVec::new(), dial_queue: SmallVec::new(), @@ -244,7 +247,6 @@ where fork_context, waker: None, resp_timeout, - log_info: (peer_id, connection_id), } } @@ -255,8 +257,8 @@ where if !self.dial_queue.is_empty() { debug!( unsent_queued_requests = self.dial_queue.len(), - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Starting handler shutdown" ); } @@ -306,8 +308,8 @@ where if !matches!(response, RpcResponse::StreamTermination(..)) { // the stream is closed after sending the expected number of responses trace!(%response, id = ?inbound_id, - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Inbound stream has expired. Response not sent"); } return; @@ -324,8 +326,8 @@ where if matches!(self.state, HandlerState::Deactivated) { // we no longer send responses after the handler is deactivated debug!(%response, id = ?inbound_id, - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Response not sent. Deactivated handler"); return; } @@ -394,8 +396,8 @@ where Poll::Ready(_) => { self.state = HandlerState::Deactivated; debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Shutdown timeout elapsed, Handler deactivated" ); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( @@ -445,8 +447,8 @@ where ))); } else { crit!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, stream_id = ?outbound_id.get_ref(), "timed out substream not in the books"); } } @@ -577,8 +579,8 @@ where // Its useful to log when the request was completed. if matches!(info.protocol, Protocol::BlocksByRange) { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, duration = Instant::now() .duration_since(info.request_start_time) .as_secs(), @@ -587,8 +589,8 @@ where } if matches!(info.protocol, Protocol::BlobsByRange) { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, duration = Instant::now() .duration_since(info.request_start_time) .as_secs(), @@ -617,16 +619,16 @@ where if matches!(info.protocol, Protocol::BlocksByRange) { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, duration = info.request_start_time.elapsed().as_secs(), "BlocksByRange Response failed" ); } if matches!(info.protocol, Protocol::BlobsByRange) { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, duration = info.request_start_time.elapsed().as_secs(), "BlobsByRange Response failed" ); @@ -816,8 +818,8 @@ where } OutboundSubstreamState::Poisoned => { crit!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Poisoned outbound substream" ); unreachable!("Coding Error: Outbound substream is poisoned") @@ -852,8 +854,8 @@ where && self.dial_negotiated == 0 { debug!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, + peer_id = %self.peer_id, + connection_id = %self.connection_id, "Goodbye sent, Handler deactivated" ); self.state = HandlerState::Deactivated; @@ -986,12 +988,13 @@ where self.shutdown(None); } - self.events_out - .push(HandlerEvent::Ok(RPCReceived::Request(Request { - id: RequestId::next(), + self.events_out.push(HandlerEvent::Ok(RPCReceived::Request( + super::InboundRequestId { + connection_id: self.connection_id, substream_id: self.current_inbound_substream_id, - r#type: req, - }))); + }, + req, + ))); self.current_inbound_substream_id.0 += 1; } @@ -1049,9 +1052,8 @@ where .is_some() { crit!( - peer_id = %self.log_info.0, - connection_id = %self.log_info.1, - + peer_id = %self.peer_id, + connection_id = %self.connection_id, id = ?self.current_outbound_substream_id, "Duplicate outbound substream id"); } self.current_outbound_substream_id.0 += 1; diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index f5085e798c..0f23da7f38 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -16,7 +16,6 @@ use libp2p::PeerId; use logging::crit; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use std::marker::PhantomData; -use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; @@ -34,7 +33,7 @@ pub use methods::{ BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest, ResponseTermination, RpcErrorResponse, StatusMessage, }; -pub use protocol::{max_rpc_size, Protocol, RPCError}; +pub use protocol::{Protocol, RPCError}; use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; use self::protocol::RPCProtocol; @@ -49,8 +48,6 @@ mod protocol; mod rate_limiter; mod self_limiter; -static NEXT_REQUEST_ID: AtomicUsize = AtomicUsize::new(1); - /// Composite trait for a request id. pub trait ReqId: Send + 'static + std::fmt::Debug + Copy + Clone {} impl ReqId for T where T: Send + 'static + std::fmt::Debug + Copy + Clone {} @@ -80,7 +77,7 @@ pub enum RPCReceived { /// /// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the /// *inbound* substream over which it is managed. - Request(Request), + Request(InboundRequestId, RequestType), /// A response received from the outside. /// /// The `Id` corresponds to the application given ID of the original request sent to the @@ -91,35 +88,30 @@ pub enum RPCReceived { EndOfStream(Id, ResponseTermination), } -/// Rpc `Request` identifier. -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct RequestId(usize); +// An identifier for the inbound requests received via Rpc. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct InboundRequestId { + /// The connection ID of the peer that sent the request. + connection_id: ConnectionId, + /// The ID of the substream that sent the request. + substream_id: SubstreamId, +} -impl RequestId { - /// Returns the next available [`RequestId`]. - pub fn next() -> Self { - Self(NEXT_REQUEST_ID.fetch_add(1, Ordering::SeqCst)) - } - - /// Creates an _unchecked_ [`RequestId`]. +impl InboundRequestId { + /// Creates an _unchecked_ [`InboundRequestId`]. /// - /// [`Rpc`] enforces that [`RequestId`]s are unique and not reused. + /// [`Rpc`] enforces that [`InboundRequestId`]s are unique and not reused. /// This constructor does not, hence the _unchecked_. /// /// It is primarily meant for allowing manual tests. - pub fn new_unchecked(id: usize) -> Self { - Self(id) + pub fn new_unchecked(connection_id: usize, substream_id: usize) -> Self { + Self { + connection_id: ConnectionId::new_unchecked(connection_id), + substream_id: SubstreamId::new(substream_id), + } } } -/// An Rpc Request. -#[derive(Debug, Clone)] -pub struct Request { - pub id: RequestId, - pub substream_id: SubstreamId, - pub r#type: RequestType, -} - impl std::fmt::Display for RPCSend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -136,7 +128,7 @@ pub struct RPCMessage { /// The peer that sent the message. pub peer_id: PeerId, /// Handler managing this message. - pub conn_id: ConnectionId, + pub connection_id: ConnectionId, /// The message that was sent. pub message: Result, HandlerErr>, } @@ -144,7 +136,7 @@ pub struct RPCMessage { type BehaviourAction = ToSwarm, RPCSend>; pub struct NetworkParams { - pub max_chunk_size: usize, + pub max_payload_size: usize, pub ttfb_timeout: Duration, pub resp_timeout: Duration, } @@ -215,14 +207,13 @@ impl RPC { pub fn send_response( &mut self, peer_id: PeerId, - id: (ConnectionId, SubstreamId), - _request_id: RequestId, - event: RpcResponse, + request_id: InboundRequestId, + response: RpcResponse, ) { self.events.push(ToSwarm::NotifyHandler { peer_id, - handler: NotifyHandler::One(id.0), - event: RPCSend::Response(id.1, event), + handler: NotifyHandler::One(request_id.connection_id), + event: RPCSend::Response(request_id.substream_id, response), }); } @@ -315,7 +306,7 @@ where let protocol = SubstreamProtocol::new( RPCProtocol { fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size), + max_rpc_size: self.fork_context.spec.max_payload_size as usize, enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, ttfb_timeout: self.network_params.ttfb_timeout, @@ -345,7 +336,7 @@ where let protocol = SubstreamProtocol::new( RPCProtocol { fork_context: self.fork_context.clone(), - max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size), + max_rpc_size: self.fork_context.spec.max_payload_size as usize, enable_light_client_server: self.enable_light_client_server, phantom: PhantomData, ttfb_timeout: self.network_params.ttfb_timeout, @@ -387,7 +378,7 @@ where for (id, proto) in limiter.peer_disconnected(peer_id) { let error_msg = ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id: connection_id, + connection_id, message: Err(HandlerErr::Outbound { id, proto, @@ -408,7 +399,7 @@ where } if *p == peer_id => { *event = ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id: connection_id, + connection_id, message: Err(HandlerErr::Outbound { id: *request_id, proto: req.versioned_protocol().protocol(), @@ -424,21 +415,17 @@ where fn on_connection_handler_event( &mut self, peer_id: PeerId, - conn_id: ConnectionId, + connection_id: ConnectionId, event: ::ToBehaviour, ) { match event { - HandlerEvent::Ok(RPCReceived::Request(Request { - id, - substream_id, - r#type, - })) => { + HandlerEvent::Ok(RPCReceived::Request(request_id, request_type)) => { if let Some(limiter) = self.limiter.as_mut() { // check if the request is conformant to the quota - match limiter.allows(&peer_id, &r#type) { + match limiter.allows(&peer_id, &request_type) { Err(RateLimitedErr::TooLarge) => { // we set the batch sizes, so this is a coding/config err for most protocols - let protocol = r#type.versioned_protocol().protocol(); + let protocol = request_type.versioned_protocol().protocol(); if matches!( protocol, Protocol::BlocksByRange @@ -448,7 +435,7 @@ where | Protocol::BlobsByRoot | Protocol::DataColumnsByRoot ) { - debug!(request = %r#type, %protocol, "Request too large to process"); + debug!(request = %request_type, %protocol, "Request too large to process"); } else { // Other protocols shouldn't be sending large messages, we should flag the peer kind crit!(%protocol, "Request size too large to ever be processed"); @@ -457,8 +444,7 @@ where // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, substream_id), - id, + request_id, RpcResponse::Error( RpcErrorResponse::RateLimited, "Rate limited. Request too large".into(), @@ -467,13 +453,12 @@ where return; } Err(RateLimitedErr::TooSoon(wait_time)) => { - debug!(request = %r#type, %peer_id, wait_time_ms = wait_time.as_millis(), "Request exceeds the rate limit"); + debug!(request = %request_type, %peer_id, wait_time_ms = wait_time.as_millis(), "Request exceeds the rate limit"); // send an error code to the peer. // the handler upon receiving the error code will send it back to the behaviour self.send_response( peer_id, - (conn_id, substream_id), - id, + request_id, RpcResponse::Error( RpcErrorResponse::RateLimited, format!("Wait {:?}", wait_time).into(), @@ -487,12 +472,11 @@ where } // If we received a Ping, we queue a Pong response. - if let RequestType::Ping(_) = r#type { - trace!(connection_id = %conn_id, %peer_id, "Received Ping, queueing Pong"); + if let RequestType::Ping(_) = request_type { + trace!(connection_id = %connection_id, %peer_id, "Received Ping, queueing Pong"); self.send_response( peer_id, - (conn_id, substream_id), - id, + request_id, RpcResponse::Success(RpcSuccessResponse::Pong(Ping { data: self.seq_number, })), @@ -501,25 +485,21 @@ where self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id, - message: Ok(RPCReceived::Request(Request { - id, - substream_id, - r#type, - })), + connection_id, + message: Ok(RPCReceived::Request(request_id, request_type)), })); } HandlerEvent::Ok(rpc) => { self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id, + connection_id, message: Ok(rpc), })); } HandlerEvent::Err(err) => { self.events.push(ToSwarm::GenerateEvent(RPCMessage { peer_id, - conn_id, + connection_id, message: Err(err), })); } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index eac7d67490..8fc1e9a5f4 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -57,7 +57,7 @@ pub static SIGNED_BEACON_BLOCK_ALTAIR_MAX: LazyLock = LazyLock::new(|| { /// The `BeaconBlockBellatrix` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network -/// with `max_chunk_size`. +/// with `max_payload_size`. pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock = LazyLock::new(|| // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX @@ -122,15 +122,6 @@ const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// established before the stream is terminated. const REQUEST_TIMEOUT: u64 = 15; -/// Returns the maximum bytes that can be sent across the RPC. -pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize { - if fork_context.current_fork().bellatrix_enabled() { - max_chunk_size - } else { - max_chunk_size / 10 - } -} - /// Returns the rpc limits for beacon_block_by_range and beacon_block_by_root responses. /// /// Note: This function should take care to return the min/max limits accounting for all diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index af6ac37d2c..e4af977a6c 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -207,7 +207,7 @@ mod tests { use crate::rpc::rate_limiter::Quota; use crate::rpc::self_limiter::SelfRateLimiter; use crate::rpc::{Ping, Protocol, RequestType}; - use crate::service::api_types::{AppRequestId, RequestId, SingleLookupReqId, SyncRequestId}; + use crate::service::api_types::{AppRequestId, SingleLookupReqId, SyncRequestId}; use libp2p::PeerId; use logging::create_test_tracing_subscriber; use std::time::Duration; @@ -226,7 +226,7 @@ mod tests { Hash256::ZERO, &MainnetEthSpec::default_spec(), )); - let mut limiter: SelfRateLimiter = + let mut limiter: SelfRateLimiter = SelfRateLimiter::new(config, fork_context).unwrap(); let peer_id = PeerId::random(); let lookup_id = 0; @@ -234,12 +234,12 @@ mod tests { for i in 1..=5u32 { let _ = limiter.allows( peer_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + AppRequestId::Sync(SyncRequestId::SingleBlock { id: SingleLookupReqId { lookup_id, req_id: i, }, - })), + }), RequestType::Ping(Ping { data: i as u64 }), ); } @@ -256,9 +256,9 @@ mod tests { for i in 2..=5u32 { assert!(matches!( iter.next().unwrap().request_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + AppRequestId::Sync(SyncRequestId::SingleBlock { id: SingleLookupReqId { req_id, .. }, - })) if req_id == i, + }) if req_id == i, )); } @@ -281,9 +281,9 @@ mod tests { for i in 3..=5 { assert!(matches!( iter.next().unwrap().request_id, - RequestId::Application(AppRequestId::Sync(SyncRequestId::SingleBlock { + AppRequestId::Sync(SyncRequestId::SingleBlock { id: SingleLookupReqId { req_id, .. }, - })) if req_id == i, + }) if req_id == i, )); } diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 894fff5074..b36f8cc215 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -1,8 +1,4 @@ -use crate::rpc::{ - methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}, - SubstreamId, -}; -use libp2p::swarm::ConnectionId; +use crate::rpc::methods::{ResponseTermination, RpcResponse, RpcSuccessResponse, StatusMessage}; use std::fmt::{Display, Formatter}; use std::sync::Arc; use types::{ @@ -10,9 +6,6 @@ use types::{ LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, }; -/// Identifier of requests sent by a peer. -pub type PeerRequestId = (ConnectionId, SubstreamId); - pub type Id = u32; #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] @@ -130,12 +123,6 @@ pub struct CustodyRequester(pub SingleLookupReqId); pub enum AppRequestId { Sync(SyncRequestId), Router, -} - -/// Global identifier of a request. -#[derive(Debug, Clone, Copy)] -pub enum RequestId { - Application(AppRequestId), Internal, } diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 9650976c63..86da517e21 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -10,8 +10,9 @@ use crate::peer_manager::{ use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; use crate::rpc::methods::MetadataRequest; use crate::rpc::{ - self, GoodbyeReason, HandlerErr, NetworkParams, Protocol, RPCError, RPCMessage, RPCReceived, - RequestType, ResponseTermination, RpcErrorResponse, RpcResponse, RpcSuccessResponse, RPC, + GoodbyeReason, HandlerErr, InboundRequestId, NetworkParams, Protocol, RPCError, RPCMessage, + RPCReceived, RequestType, ResponseTermination, RpcErrorResponse, RpcResponse, + RpcSuccessResponse, RPC, }; use crate::types::{ all_topics_at_fork, core_topics_to_subscribe, is_fork_non_core_topic, subnet_from_topic_hash, @@ -20,7 +21,7 @@ use crate::types::{ use crate::EnrExt; use crate::Eth2Enr; use crate::{metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; -use api_types::{AppRequestId, PeerRequestId, RequestId, Response}; +use api_types::{AppRequestId, Response}; use futures::stream::StreamExt; use gossipsub::{ IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, @@ -66,7 +67,7 @@ pub enum NetworkEvent { /// An RPC Request that was sent failed. RPCFailed { /// The id of the failed request. - id: AppRequestId, + app_request_id: AppRequestId, /// The peer to which this request was sent. peer_id: PeerId, /// The error of the failed request. @@ -76,15 +77,15 @@ pub enum NetworkEvent { /// The peer that sent the request. peer_id: PeerId, /// Identifier of the request. All responses to this request must use this id. - id: PeerRequestId, + inbound_request_id: InboundRequestId, /// Request the peer sent. - request: rpc::Request, + request_type: RequestType, }, ResponseReceived { /// Peer that sent the response. peer_id: PeerId, /// Id of the request to which the peer is responding. - id: AppRequestId, + app_request_id: AppRequestId, /// Response the peer sent. response: Response, }, @@ -126,7 +127,7 @@ where /// The peer manager that keeps track of peer's reputation and status. pub peer_manager: PeerManager, /// The Eth2 RPC specified in the wire-0 protocol. - pub eth2_rpc: RPC, + pub eth2_rpc: RPC, /// Discv5 Discovery protocol. pub discovery: Discovery, /// Keep regular connection to peers and disconnect if absent. @@ -222,7 +223,7 @@ impl Network { let gossipsub_config_params = GossipsubConfigParams { message_domain_valid_snappy: ctx.chain_spec.message_domain_valid_snappy, - gossip_max_size: ctx.chain_spec.gossip_max_size as usize, + gossipsub_max_transmit_size: ctx.chain_spec.max_message_size(), }; let gs_config = gossipsub_config( config.network_load, @@ -333,7 +334,9 @@ impl Network { ) }); - let snappy_transform = SnappyTransform::new(gs_config.max_transmit_size()); + let spec = &ctx.chain_spec; + let snappy_transform = + SnappyTransform::new(spec.max_payload_size as usize, spec.max_compressed_len()); let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( MessageAuthenticity::Anonymous, gs_config.clone(), @@ -364,7 +367,7 @@ impl Network { }; let network_params = NetworkParams { - max_chunk_size: ctx.chain_spec.max_chunk_size as usize, + max_payload_size: ctx.chain_spec.max_payload_size as usize, ttfb_timeout: ctx.chain_spec.ttfb_timeout(), resp_timeout: ctx.chain_spec.resp_timeout(), }; @@ -669,7 +672,7 @@ impl Network { name = "libp2p", skip_all )] - pub fn eth2_rpc_mut(&mut self) -> &mut RPC { + pub fn eth2_rpc_mut(&mut self) -> &mut RPC { &mut self.swarm.behaviour_mut().eth2_rpc } /// Discv5 Discovery protocol. @@ -720,7 +723,7 @@ impl Network { name = "libp2p", skip_all )] - pub fn eth2_rpc(&self) -> &RPC { + pub fn eth2_rpc(&self) -> &RPC { &self.swarm.behaviour().eth2_rpc } /// Discv5 Discovery protocol. @@ -1104,16 +1107,16 @@ impl Network { pub fn send_request( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, request: RequestType, ) -> Result<(), (AppRequestId, RPCError)> { // Check if the peer is connected before sending an RPC request if !self.swarm.is_connected(&peer_id) { - return Err((request_id, RPCError::Disconnected)); + return Err((app_request_id, RPCError::Disconnected)); } self.eth2_rpc_mut() - .send_request(peer_id, RequestId::Application(request_id), request); + .send_request(peer_id, app_request_id, request); Ok(()) } @@ -1127,12 +1130,11 @@ impl Network { pub fn send_response( &mut self, peer_id: PeerId, - id: PeerRequestId, - request_id: rpc::RequestId, + inbound_request_id: InboundRequestId, response: Response, ) { self.eth2_rpc_mut() - .send_response(peer_id, id, request_id, response.into()) + .send_response(peer_id, inbound_request_id, response.into()) } /// Inform the peer that their request produced an error. @@ -1145,15 +1147,13 @@ impl Network { pub fn send_error_response( &mut self, peer_id: PeerId, - id: PeerRequestId, - request_id: rpc::RequestId, + inbound_request_id: InboundRequestId, error: RpcErrorResponse, reason: String, ) { self.eth2_rpc_mut().send_response( peer_id, - id, - request_id, + inbound_request_id, RpcResponse::Error(error, reason.into()), ) } @@ -1374,7 +1374,7 @@ impl Network { skip_all )] fn ping(&mut self, peer_id: PeerId) { - self.eth2_rpc_mut().ping(peer_id, RequestId::Internal); + self.eth2_rpc_mut().ping(peer_id, AppRequestId::Internal); } /// Sends a METADATA request to a peer. @@ -1394,7 +1394,7 @@ impl Network { RequestType::MetaData(MetadataRequest::new_v2()) }; self.eth2_rpc_mut() - .send_request(peer_id, RequestId::Internal, event); + .send_request(peer_id, AppRequestId::Internal, event); } /// Sends a METADATA response to a peer. @@ -1407,15 +1407,14 @@ impl Network { fn send_meta_data_response( &mut self, _req: MetadataRequest, - id: PeerRequestId, - request_id: rpc::RequestId, + inbound_request_id: InboundRequestId, peer_id: PeerId, ) { let metadata = self.network_globals.local_metadata.read().clone(); // The encoder is responsible for sending the negotiated version of the metadata let event = RpcResponse::Success(RpcSuccessResponse::MetaData(Arc::new(metadata))); self.eth2_rpc_mut() - .send_response(peer_id, id, request_id, event); + .send_response(peer_id, inbound_request_id, event); } // RPC Propagation methods @@ -1429,17 +1428,17 @@ impl Network { )] fn build_response( &mut self, - id: RequestId, + app_request_id: AppRequestId, peer_id: PeerId, response: Response, ) -> Option> { - match id { - RequestId::Application(id) => Some(NetworkEvent::ResponseReceived { + match app_request_id { + AppRequestId::Internal => None, + _ => Some(NetworkEvent::ResponseReceived { peer_id, - id, + app_request_id, response, }), - RequestId::Internal => None, } } @@ -1643,7 +1642,7 @@ impl Network { name = "libp2p", skip_all )] - fn inject_rpc_event(&mut self, event: RPCMessage) -> Option> { + fn inject_rpc_event(&mut self, event: RPCMessage) -> Option> { let peer_id = event.peer_id; // Do not permit Inbound events from peers that are being disconnected or RPC requests, @@ -1656,7 +1655,6 @@ impl Network { return None; } - let connection_id = event.conn_id; // The METADATA and PING RPC responses are handled within the behaviour and not propagated match event.message { Err(handler_err) => { @@ -1686,16 +1684,20 @@ impl Network { ConnectionDirection::Outgoing, ); // inform failures of requests coming outside the behaviour - if let RequestId::Application(id) = id { - Some(NetworkEvent::RPCFailed { peer_id, id, error }) - } else { + if let AppRequestId::Internal = id { None + } else { + Some(NetworkEvent::RPCFailed { + peer_id, + app_request_id: id, + error, + }) } } } } - Ok(RPCReceived::Request(request)) => { - match request.r#type { + Ok(RPCReceived::Request(inbound_request_id, request_type)) => { + match request_type { /* Behaviour managed protocols: Ping and Metadata */ RequestType::Ping(ping) => { // inform the peer manager and send the response @@ -1704,12 +1706,7 @@ impl Network { } RequestType::MetaData(req) => { // send the requested meta-data - self.send_meta_data_response( - req, - (connection_id, request.substream_id), - request.id, - peer_id, - ); + self.send_meta_data_response(req, inbound_request_id, peer_id); None } RequestType::Goodbye(reason) => { @@ -1734,8 +1731,8 @@ impl Network { // propagate the STATUS message upwards Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::BlocksByRange(ref req) => { @@ -1757,32 +1754,32 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::BlocksByRoot(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::BlobsByRange(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"]); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::BlobsByRoot(_) => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::DataColumnsByRoot(_) => { @@ -1792,8 +1789,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::DataColumnsByRange(_) => { @@ -1803,8 +1800,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::LightClientBootstrap(_) => { @@ -1814,8 +1811,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::LightClientOptimisticUpdate => { @@ -1825,8 +1822,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::LightClientFinalityUpdate => { @@ -1836,8 +1833,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } RequestType::LightClientUpdatesByRange(_) => { @@ -1847,8 +1844,8 @@ impl Network { ); Some(NetworkEvent::RequestReceived { peer_id, - id: (connection_id, request.substream_id), - request, + inbound_request_id, + request_type, }) } } @@ -2010,7 +2007,7 @@ impl Network { debug!(%peer_id, %reason, "Peer Manager disconnecting peer"); // send one goodbye self.eth2_rpc_mut() - .shutdown(peer_id, RequestId::Internal, reason); + .shutdown(peer_id, AppRequestId::Internal, reason); None } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index c199d2312b..880b387250 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -52,13 +52,16 @@ pub enum PubsubMessage { // Implements the `DataTransform` trait of gossipsub to employ snappy compression pub struct SnappyTransform { /// Sets the maximum size we allow gossipsub messages to decompress to. - max_size_per_message: usize, + max_uncompressed_len: usize, + /// Sets the maximum size we allow for compressed gossipsub message data. + max_compressed_len: usize, } impl SnappyTransform { - pub fn new(max_size_per_message: usize) -> Self { + pub fn new(max_uncompressed_len: usize, max_compressed_len: usize) -> Self { SnappyTransform { - max_size_per_message, + max_uncompressed_len, + max_compressed_len, } } } @@ -69,12 +72,19 @@ impl gossipsub::DataTransform for SnappyTransform { &self, raw_message: gossipsub::RawMessage, ) -> Result { - // check the length of the raw bytes - let len = decompress_len(&raw_message.data)?; - if len > self.max_size_per_message { + // first check the size of the compressed payload + if raw_message.data.len() > self.max_compressed_len { return Err(Error::new( ErrorKind::InvalidData, - "ssz_snappy decoded data > GOSSIP_MAX_SIZE", + "ssz_snappy encoded data > max_compressed_len", + )); + } + // check the length of the uncompressed bytes + let len = decompress_len(&raw_message.data)?; + if len > self.max_uncompressed_len { + return Err(Error::new( + ErrorKind::InvalidData, + "ssz_snappy decoded data > MAX_PAYLOAD_SIZE", )); } @@ -98,10 +108,10 @@ impl gossipsub::DataTransform for SnappyTransform { ) -> Result, std::io::Error> { // Currently we are not employing topic-based compression. Everything is expected to be // snappy compressed. - if data.len() > self.max_size_per_message { + if data.len() > self.max_uncompressed_len { return Err(Error::new( ErrorKind::InvalidData, - "ssz_snappy Encoded data > GOSSIP_MAX_SIZE", + "ssz_snappy Encoded data > MAX_PAYLOAD_SIZE", )); } let mut encoder = Encoder::new(); diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index d736fefa5f..7a0eb4602b 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -5,7 +5,7 @@ mod common; use common::{build_tracing_subscriber, Protocol}; use lighthouse_network::rpc::{methods::*, RequestType}; use lighthouse_network::service::api_types::AppRequestId; -use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Response}; +use lighthouse_network::{NetworkEvent, ReportSource, Response}; use ssz::Encode; use ssz_types::VariableList; use std::sync::Arc; @@ -15,37 +15,37 @@ use tokio::time::sleep; use tracing::{debug, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BlobSidecar, ChainSpec, - EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, MinimalEthSpec, + EmptyBlock, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MinimalEthSpec, RuntimeVariableList, Signature, SignedBeaconBlock, Slot, }; type E = MinimalEthSpec; /// Bellatrix block with length < max_rpc_size. -fn bellatrix_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { +fn bellatrix_block_small(spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Bellatrix(block); - assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize)); + assert!(block.ssz_bytes_len() <= spec.max_payload_size as usize); block } /// Bellatrix block with length > MAX_RPC_SIZE. /// The max limit for a bellatrix block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. -fn bellatrix_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock { +fn bellatrix_block_large(spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::>()); + let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); block.body.execution_payload.execution_payload.transactions = txs; let block = BeaconBlock::Bellatrix(block); - assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize)); + assert!(block.ssz_bytes_len() > spec.max_payload_size as usize); block } @@ -98,7 +98,7 @@ fn test_tcp_status_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: AppRequestId::Router, + app_request_id: AppRequestId::Router, response, } => { // Should receive the RPC response @@ -118,13 +118,17 @@ fn test_tcp_status_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response debug!("Receiver Received"); - receiver.send_response(peer_id, id, request.id, rpc_response.clone()); + receiver.send_response( + peer_id, + inbound_request_id, + rpc_response.clone(), + ); } } _ => {} // Ignore other events @@ -184,7 +188,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); - let full_block = bellatrix_block_small(&common::fork_context(ForkName::Bellatrix), &spec); + let full_block = bellatrix_block_small(&spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_bellatrix_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); @@ -204,7 +208,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: _, + app_request_id: _, response, } => { warn!("Sender received a response"); @@ -240,10 +244,10 @@ fn test_tcp_blocks_by_range_chunked_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); for i in 0..messages_to_send { @@ -258,16 +262,14 @@ fn test_tcp_blocks_by_range_chunked_rpc() { }; receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, rpc_response.clone(), ); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlocksByRange(None), ); } @@ -338,7 +340,7 @@ fn test_blobs_by_range_chunked_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: _, + app_request_id: _, response, } => { warn!("Sender received a response"); @@ -368,10 +370,10 @@ fn test_blobs_by_range_chunked_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); for _ in 0..messages_to_send { @@ -379,16 +381,14 @@ fn test_blobs_by_range_chunked_rpc() { // second as altair and third as bellatrix. receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, rpc_response.clone(), ); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlobsByRange(None), ); } @@ -442,7 +442,7 @@ fn test_tcp_blocks_by_range_over_limit() { })); // BlocksByRange Response - let full_block = bellatrix_block_large(&common::fork_context(ForkName::Bellatrix), &spec); + let full_block = bellatrix_block_large(&spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_bellatrix_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); @@ -459,8 +459,8 @@ fn test_tcp_blocks_by_range_over_limit() { .unwrap(); } // The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE - NetworkEvent::RPCFailed { id, .. } => { - assert!(matches!(id, AppRequestId::Router)); + NetworkEvent::RPCFailed { app_request_id, .. } => { + assert!(matches!(app_request_id, AppRequestId::Router)); return; } _ => {} // Ignore other behaviour events @@ -474,26 +474,24 @@ fn test_tcp_blocks_by_range_over_limit() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); for _ in 0..messages_to_send { let rpc_response = rpc_response_bellatrix_large.clone(); receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, rpc_response.clone(), ); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlocksByRange(None), ); } @@ -566,7 +564,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { } NetworkEvent::ResponseReceived { peer_id: _, - id: _, + app_request_id: _, response, } => // Should receive the RPC response @@ -608,15 +606,15 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { futures::future::Either::Left(( NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, }, _, )) => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); - message_info = Some((peer_id, id, request.id)); + message_info = Some((peer_id, inbound_request_id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -626,8 +624,8 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); + let (peer_id, inbound_request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *inbound_request_id, rpc_response.clone()); debug!("Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -700,7 +698,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: AppRequestId::Router, + app_request_id: AppRequestId::Router, response, } => match response { Response::BlocksByRange(Some(_)) => { @@ -727,26 +725,24 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); for _ in 1..=messages_to_send { receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, rpc_response.clone(), ); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlocksByRange(None), ); } @@ -817,7 +813,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); - let full_block = bellatrix_block_small(&common::fork_context(ForkName::Bellatrix), &spec); + let full_block = bellatrix_block_small(&spec); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let rpc_response_bellatrix_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); @@ -837,7 +833,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { } NetworkEvent::ResponseReceived { peer_id: _, - id: AppRequestId::Router, + app_request_id: AppRequestId::Router, response, } => match response { Response::BlocksByRoot(Some(_)) => { @@ -870,10 +866,10 @@ fn test_tcp_blocks_by_root_chunked_rpc() { match receiver.next_event().await { NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response debug!("Receiver got request"); @@ -886,14 +882,13 @@ fn test_tcp_blocks_by_root_chunked_rpc() { } else { rpc_response_bellatrix_small.clone() }; - receiver.send_response(peer_id, id, request.id, rpc_response); + receiver.send_response(peer_id, inbound_request_id, rpc_response); debug!("Sending message"); } // send the stream termination receiver.send_response( peer_id, - id, - request.id, + inbound_request_id, Response::BlocksByRange(None), ); debug!("Send stream term"); @@ -977,7 +972,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { } NetworkEvent::ResponseReceived { peer_id: _, - id: AppRequestId::Router, + app_request_id: AppRequestId::Router, response, } => { debug!("Sender received a response"); @@ -1019,15 +1014,15 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { futures::future::Either::Left(( NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, }, _, )) => { - if request.r#type == rpc_request { + if request_type == rpc_request { // send the response warn!("Receiver got request"); - message_info = Some((peer_id, id, request.id)); + message_info = Some((peer_id, inbound_request_id)); } } futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required @@ -1037,8 +1032,8 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { // if we need to send messages send them here. This will happen after a delay if message_info.is_some() { messages_sent += 1; - let (peer_id, stream_id, request_id) = message_info.as_ref().unwrap(); - receiver.send_response(*peer_id, *stream_id, *request_id, rpc_response.clone()); + let (peer_id, inbound_request_id) = message_info.as_ref().unwrap(); + receiver.send_response(*peer_id, *inbound_request_id, rpc_response.clone()); debug!("Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 7c38ae9d75..b129b54841 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -88,6 +88,15 @@ pub static BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { + try_create_histogram_vec_with_buckets( + "beacon_processor_get_block_roots_time_seconds", + "Time to complete get_block_roots when serving by_range requests", + decimal_buckets(-3, -1), + &["source"], + ) + }); /* * Gossip processor diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index f104bbf1bc..d61ea58377 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -821,11 +821,12 @@ impl NetworkBeaconProcessor { | GossipDataColumnError::ProposerIndexMismatch { .. } | GossipDataColumnError::IsNotLaterThanParent { .. } | GossipDataColumnError::InvalidSubnetId { .. } - | GossipDataColumnError::InvalidInclusionProof { .. } + | GossipDataColumnError::InvalidInclusionProof | GossipDataColumnError::InvalidKzgProof { .. } | GossipDataColumnError::UnexpectedDataColumn | GossipDataColumnError::InvalidColumnIndex(_) - | GossipDataColumnError::InconsistentCommitmentsOrProofLength + | GossipDataColumnError::InconsistentCommitmentsLength { .. } + | GossipDataColumnError::InconsistentProofsLength { .. } | GossipDataColumnError::NotFinalizedDescendant { .. } => { debug!( error = ?err, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 1329936932..9a8edbfa4c 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -15,12 +15,11 @@ use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; -use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, LightClientUpdatesByRangeRequest, }; -use lighthouse_network::rpc::{RequestId, SubstreamId}; +use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PubsubMessage, @@ -647,21 +646,13 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_range_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, // Use ResponseId here request: BlocksByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_range_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + .handle_blocks_by_range_request(peer_id, inbound_request_id, request) .await; }; @@ -675,21 +666,13 @@ impl NetworkBeaconProcessor { pub fn send_blocks_by_roots_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, // Use ResponseId here request: BlocksByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = async move { processor - .handle_blocks_by_root_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + .handle_blocks_by_root_request(peer_id, inbound_request_id, request) .await; }; @@ -703,21 +686,12 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_range_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlobsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_blobs_by_range_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) - }; + let process_fn = + move || processor.handle_blobs_by_range_request(peer_id, inbound_request_id, request); self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -729,21 +703,12 @@ impl NetworkBeaconProcessor { pub fn send_blobs_by_roots_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlobsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_blobs_by_root_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) - }; + let process_fn = + move || processor.handle_blobs_by_root_request(peer_id, inbound_request_id, request); self.try_send(BeaconWorkEvent { drop_during_sync: false, @@ -755,20 +720,12 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_roots_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: DataColumnsByRootRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = move || { - processor.handle_data_columns_by_root_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + processor.handle_data_columns_by_root_request(peer_id, inbound_request_id, request) }; self.try_send(BeaconWorkEvent { @@ -781,20 +738,12 @@ impl NetworkBeaconProcessor { pub fn send_data_columns_by_range_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: DataColumnsByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = move || { - processor.handle_data_columns_by_range_request( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + processor.handle_data_columns_by_range_request(peer_id, inbound_request_id, request) }; self.try_send(BeaconWorkEvent { @@ -807,21 +756,12 @@ impl NetworkBeaconProcessor { pub fn send_light_client_bootstrap_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: LightClientBootstrapRequest, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_light_client_bootstrap( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) - }; + let process_fn = + move || processor.handle_light_client_bootstrap(peer_id, inbound_request_id, request); self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -833,19 +773,11 @@ impl NetworkBeaconProcessor { pub fn send_light_client_optimistic_update_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_light_client_optimistic_update( - peer_id, - connection_id, - substream_id, - request_id, - ) - }; + let process_fn = + move || processor.handle_light_client_optimistic_update(peer_id, inbound_request_id); self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -857,19 +789,11 @@ impl NetworkBeaconProcessor { pub fn send_light_client_finality_update_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) -> Result<(), Error> { let processor = self.clone(); - let process_fn = move || { - processor.handle_light_client_finality_update( - peer_id, - connection_id, - substream_id, - request_id, - ) - }; + let process_fn = + move || processor.handle_light_client_finality_update(peer_id, inbound_request_id); self.try_send(BeaconWorkEvent { drop_during_sync: true, @@ -881,20 +805,12 @@ impl NetworkBeaconProcessor { pub fn send_light_client_updates_by_range_request( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: LightClientUpdatesByRangeRequest, ) -> Result<(), Error> { let processor = self.clone(); let process_fn = move || { - processor.handle_light_client_updates_by_range( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + processor.handle_light_client_updates_by_range(peer_id, inbound_request_id, request) }; self.try_send(BeaconWorkEvent { @@ -927,14 +843,10 @@ impl NetworkBeaconProcessor { block_root: Hash256, publish_blobs: bool, ) { - let is_supernode = self.network_globals.is_supernode(); - + let custody_columns = self.network_globals.sampling_columns.clone(); let self_cloned = self.clone(); let publish_fn = move |blobs_or_data_column| { - // At the moment non supernodes are not required to publish any columns. - // TODO(das): we could experiment with having full nodes publish their custodied - // columns here. - if publish_blobs && is_supernode { + if publish_blobs { match blobs_or_data_column { BlobsOrDataColumns::Blobs(blobs) => { self_cloned.publish_blobs_gradually(blobs, block_root); @@ -950,6 +862,7 @@ impl NetworkBeaconProcessor { self.chain.clone(), block_root, block.clone(), + custody_columns, publish_fn, ) .instrument(tracing::info_span!( @@ -1139,7 +1052,7 @@ impl NetworkBeaconProcessor { /// /// This is an optimisation to reduce outbound bandwidth and ensures each column is published /// by some nodes on the network as soon as possible. Our hope is that some columns arrive from - /// other supernodes in the meantime, obviating the need for us to publish them. If no other + /// other nodes in the meantime, obviating the need for us to publish them. If no other /// publisher exists for a column, it will eventually get published here. fn publish_data_columns_gradually( self: &Arc, @@ -1164,9 +1077,9 @@ impl NetworkBeaconProcessor { }); }; - // If this node is a super node, permute the columns and split them into batches. + // Permute the columns and split them into batches. // The hope is that we won't need to publish some columns because we will receive them - // on gossip from other supernodes. + // on gossip from other nodes. data_columns_to_publish.shuffle(&mut rand::thread_rng()); let blob_publication_batch_interval = chain.config.blob_publication_batch_interval; diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 7beadffc06..bc97f88492 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -1,15 +1,15 @@ +use crate::metrics; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; -use itertools::process_results; -use lighthouse_network::discovery::ConnectionId; +use itertools::{process_results, Itertools}; use lighthouse_network::rpc::methods::{ BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, }; use lighthouse_network::rpc::*; -use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; +use lighthouse_network::{PeerId, ReportSource, Response, SyncInfo}; use methods::LightClientUpdatesByRangeRequest; use slot_clock::SlotClock; use std::collections::{hash_map::Entry, HashMap}; @@ -34,15 +34,12 @@ impl NetworkBeaconProcessor { pub fn send_response( &self, peer_id: PeerId, + inbound_request_id: InboundRequestId, response: Response, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, ) { self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, - id: (connection_id, substream_id), + inbound_request_id, response, }) } @@ -52,15 +49,13 @@ impl NetworkBeaconProcessor { peer_id: PeerId, error: RpcErrorResponse, reason: String, - id: PeerRequestId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) { self.send_network_message(NetworkMessage::SendErrorResponse { peer_id, error, reason, - id, - request_id, + inbound_request_id, }) } @@ -161,24 +156,14 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlocksByRootRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, self.clone() - .handle_blocks_by_root_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - request, - ) + .handle_blocks_by_root_request_inner(peer_id, inbound_request_id, request) .await, Response::BlocksByRoot, ); @@ -188,9 +173,7 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_root_request_inner( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlocksByRootRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { let log_results = |peer_id, requested_blocks, send_block_count| { @@ -220,10 +203,8 @@ impl NetworkBeaconProcessor { Ok(Some(block)) => { self.send_response( peer_id, + inbound_request_id, Response::BlocksByRoot(Some(block.clone())), - connection_id, - substream_id, - request_id, ); send_block_count += 1; } @@ -265,23 +246,13 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlobsByRootRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, - self.handle_blobs_by_root_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - request, - ), + inbound_request_id, + self.handle_blobs_by_root_request_inner(peer_id, inbound_request_id, request), Response::BlobsByRoot, ); } @@ -290,9 +261,7 @@ impl NetworkBeaconProcessor { pub fn handle_blobs_by_root_request_inner( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: BlobsByRootRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { let Some(requested_root) = request.blob_ids.as_slice().first().map(|id| id.block_root) @@ -314,10 +283,8 @@ impl NetworkBeaconProcessor { if let Ok(Some(blob)) = self.chain.data_availability_checker.get_blob(id) { self.send_response( peer_id, + inbound_request_id, Response::BlobsByRoot(Some(blob)), - connection_id, - substream_id, - request_id, ); send_blob_count += 1; } else { @@ -339,10 +306,8 @@ impl NetworkBeaconProcessor { if blob_sidecar.index == *index { self.send_response( peer_id, + inbound_request_id, Response::BlobsByRoot(Some(blob_sidecar.clone())), - connection_id, - substream_id, - request_id, ); send_blob_count += 1; break 'inner; @@ -375,23 +340,13 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: DataColumnsByRootRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, - self.handle_data_columns_by_root_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - request, - ), + inbound_request_id, + self.handle_data_columns_by_root_request_inner(peer_id, inbound_request_id, request), Response::DataColumnsByRoot, ); } @@ -400,9 +355,7 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_root_request_inner( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: DataColumnsByRootRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { let mut send_data_column_count = 0; @@ -416,10 +369,8 @@ impl NetworkBeaconProcessor { send_data_column_count += 1; self.send_response( peer_id, + inbound_request_id, Response::DataColumnsByRoot(Some(data_column)), - connection_id, - substream_id, - request_id, ); } Ok(None) => {} // no-op @@ -449,22 +400,16 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_updates_by_range( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: LightClientUpdatesByRangeRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, self.clone() .handle_light_client_updates_by_range_request_inner( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, request, ), Response::LightClientUpdatesByRange, @@ -475,9 +420,7 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_updates_by_range_request_inner( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: LightClientUpdatesByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!( @@ -516,8 +459,7 @@ impl NetworkBeaconProcessor { self.send_network_message(NetworkMessage::SendResponse { peer_id, response: Response::LightClientUpdatesByRange(Some(Arc::new(lc_update.clone()))), - request_id, - id: (connection_id, substream_id), + inbound_request_id, }); } @@ -549,16 +491,12 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_bootstrap( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, request: LightClientBootstrapRequest, ) { self.terminate_response_single_item( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, match self.chain.get_light_client_bootstrap(&request.root) { Ok(Some((bootstrap, _))) => Ok(Arc::new(bootstrap)), Ok(None) => Err(( @@ -583,15 +521,11 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_optimistic_update( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) { self.terminate_response_single_item( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, match self .chain .light_client_server_cache @@ -611,15 +545,11 @@ impl NetworkBeaconProcessor { pub fn handle_light_client_finality_update( self: &Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, ) { self.terminate_response_single_item( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, match self .chain .light_client_server_cache @@ -639,24 +569,14 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: BlocksByRangeRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, + inbound_request_id, self.clone() - .handle_blocks_by_range_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - req, - ) + .handle_blocks_by_range_request_inner(peer_id, inbound_request_id, req) .await, Response::BlocksByRange, ); @@ -666,102 +586,60 @@ impl NetworkBeaconProcessor { pub async fn handle_blocks_by_range_request_inner( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: BlocksByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { + let req_start_slot = *req.start_slot(); + let req_count = *req.count(); + debug!( %peer_id, - count = req.count(), - start_slot = %req.start_slot(), + count = req_count, + start_slot = %req_start_slot, "Received BlocksByRange Request" ); - let forwards_block_root_iter = match self - .chain - .forwards_iter_block_roots(Slot::from(*req.start_slot())) - { - Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockOutOfRange { - slot, - oldest_block_slot, - }) => { - debug!( - requested_slot = %slot, - oldest_known_slot = %oldest_block_slot, - "Range request failed during backfill" - ); - return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); - } - Err(e) => { - error!( - request = ?req, - %peer_id, - error = ?e, - "Unable to obtain root iter" - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; - - // Pick out the required blocks, ignoring skip-slots. - let mut last_block_root = None; - let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| { - slot.as_u64() < req.start_slot().saturating_add(*req.count()) - }) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .collect::>>() - }); - - let block_roots = match maybe_block_roots { - Ok(block_roots) => block_roots, - Err(e) => { - error!( - request = ?req, - %peer_id, - error = ?e, - "Error during iteration over blocks" - ); - return Err((RpcErrorResponse::ServerError, "Iteration error")); - } - }; - - // remove all skip slots - let block_roots = block_roots.into_iter().flatten().collect::>(); + // Spawn a blocking handle since get_block_roots_for_slot_range takes a sync lock on the + // fork-choice. + let network_beacon_processor = self.clone(); + let block_roots = self + .executor + .spawn_blocking_handle( + move || { + network_beacon_processor.get_block_roots_for_slot_range( + req_start_slot, + req_count, + "BlocksByRange", + ) + }, + "get_block_roots_for_slot_range", + ) + .ok_or((RpcErrorResponse::ServerError, "shutting down"))? + .await + .map_err(|_| (RpcErrorResponse::ServerError, "tokio join"))??; let current_slot = self .chain .slot() .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot()); - let log_results = |req: BlocksByRangeRequest, peer_id, blocks_sent| { - if blocks_sent < (*req.count() as usize) { + let log_results = |peer_id, blocks_sent| { + if blocks_sent < (req_count as usize) { debug!( %peer_id, msg = "Failed to return all requested blocks", - start_slot = %req.start_slot(), + start_slot = %req_start_slot, %current_slot, - requested = req.count(), + requested = req_count, returned = blocks_sent, "BlocksByRange outgoing response processed" ); } else { debug!( %peer_id, - start_slot = %req.start_slot(), + start_slot = %req_start_slot, %current_slot, - requested = req.count(), + requested = req_count, returned = blocks_sent, "BlocksByRange outgoing response processed" ); @@ -783,15 +661,13 @@ impl NetworkBeaconProcessor { Ok(Some(block)) => { // Due to skip slots, blocks could be out of the range, we ensure they // are in the range before sending - if block.slot() >= *req.start_slot() - && block.slot() < req.start_slot() + req.count() + if block.slot() >= req_start_slot && block.slot() < req_start_slot + req.count() { blocks_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, + inbound_request_id, response: Response::BlocksByRange(Some(block.clone())), - id: (connection_id, substream_id), }); } } @@ -802,7 +678,7 @@ impl NetworkBeaconProcessor { request_root = ?root, "Block in the chain is not in the store" ); - log_results(req, peer_id, blocks_sent); + log_results(peer_id, blocks_sent); return Err((RpcErrorResponse::ServerError, "Database inconsistency")); } Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => { @@ -811,7 +687,7 @@ impl NetworkBeaconProcessor { reason = "execution layer not synced", "Failed to fetch execution payload for blocks by range request" ); - log_results(req, peer_id, blocks_sent); + log_results(peer_id, blocks_sent); // send the stream terminator return Err(( RpcErrorResponse::ResourceUnavailable, @@ -837,38 +713,155 @@ impl NetworkBeaconProcessor { "Error fetching block for peer" ); } - log_results(req, peer_id, blocks_sent); + log_results(peer_id, blocks_sent); // send the stream terminator return Err((RpcErrorResponse::ServerError, "Failed fetching blocks")); } } } - log_results(req, peer_id, blocks_sent); + log_results(peer_id, blocks_sent); Ok(()) } + fn get_block_roots_for_slot_range( + &self, + req_start_slot: u64, + req_count: u64, + req_type: &str, + ) -> Result, (RpcErrorResponse, &'static str)> { + let start_time = std::time::Instant::now(); + let finalized_slot = self + .chain + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + let (block_roots, source) = if req_start_slot >= finalized_slot.as_u64() { + // If the entire requested range is after finalization, use fork_choice + ( + self.chain + .block_roots_from_fork_choice(req_start_slot, req_count), + "fork_choice", + ) + } else if req_start_slot + req_count <= finalized_slot.as_u64() { + // If the entire requested range is before finalization, use store + ( + self.get_block_roots_from_store(req_start_slot, req_count)?, + "store", + ) + } else { + // Split the request at the finalization boundary + let count_from_store = finalized_slot.as_u64() - req_start_slot; + let count_from_fork_choice = req_count - count_from_store; + let start_slot_fork_choice = finalized_slot.as_u64(); + + // Get roots from store (up to and including finalized slot) + let mut roots_from_store = + self.get_block_roots_from_store(req_start_slot, count_from_store)?; + + // Get roots from fork choice (after finalized slot) + let roots_from_fork_choice = self + .chain + .block_roots_from_fork_choice(start_slot_fork_choice, count_from_fork_choice); + + roots_from_store.extend(roots_from_fork_choice); + + (roots_from_store, "mixed") + }; + + let elapsed = start_time.elapsed(); + metrics::observe_timer_vec( + &metrics::BEACON_PROCESSOR_GET_BLOCK_ROOTS_TIME, + &[source], + elapsed, + ); + + debug!( + req_type, + start_slot = %req_start_slot, + req_count, + roots_count = block_roots.len(), + source, + elapsed = ?elapsed, + %finalized_slot, + "Range request block roots retrieved" + ); + + Ok(block_roots) + } + + /// Get block roots for a `BlocksByRangeRequest` from the store using roots iterator. + fn get_block_roots_from_store( + &self, + start_slot: u64, + count: u64, + ) -> Result, (RpcErrorResponse, &'static str)> { + let forwards_block_root_iter = + match self.chain.forwards_iter_block_roots(Slot::from(start_slot)) { + Ok(iter) => iter, + Err(BeaconChainError::HistoricalBlockOutOfRange { + slot, + oldest_block_slot, + }) => { + debug!( + requested_slot = %slot, + oldest_known_slot = %oldest_block_slot, + "Range request failed during backfill" + ); + return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); + } + Err(e) => { + error!( + %start_slot, + count, + error = ?e, + "Unable to obtain root iter for range request" + ); + return Err((RpcErrorResponse::ServerError, "Database error")); + } + }; + + // Pick out the required blocks, ignoring skip-slots. + let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { + iter.take_while(|(_, slot)| slot.as_u64() < start_slot.saturating_add(count)) + .collect::>() + }); + + let block_roots = match maybe_block_roots { + Ok(block_roots) => block_roots, + Err(e) => { + error!( + %start_slot, + count, + error = ?e, + "Error during iteration over blocks for range request" + ); + return Err((RpcErrorResponse::ServerError, "Iteration error")); + } + }; + + // remove all skip slots i.e. duplicated roots + Ok(block_roots + .into_iter() + .map(|(root, _)| root) + .unique() + .collect::>()) + } + /// Handle a `BlobsByRange` request from the peer. pub fn handle_blobs_by_range_request( self: Arc, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: BlobsByRangeRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, - self.handle_blobs_by_range_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - req, - ), + inbound_request_id, + self.handle_blobs_by_range_request_inner(peer_id, inbound_request_id, req), Response::BlobsByRange, ); } @@ -877,9 +870,7 @@ impl NetworkBeaconProcessor { fn handle_blobs_by_range_request_inner( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: BlobsByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!( @@ -926,68 +917,8 @@ impl NetworkBeaconProcessor { }; } - let forwards_block_root_iter = - match self.chain.forwards_iter_block_roots(request_start_slot) { - Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockOutOfRange { - slot, - oldest_block_slot, - }) => { - debug!( - requested_slot = %slot, - oldest_known_slot = %oldest_block_slot, - "Range request failed during backfill" - ); - return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); - } - Err(e) => { - error!( - request = ?req, - %peer_id, - error = ?e, - "Unable to obtain root iter" - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; - - // Use `WhenSlotSkipped::Prev` to get the most recent block root prior to - // `request_start_slot` in order to check whether the `request_start_slot` is a skip. - let mut last_block_root = req.start_slot.checked_sub(1).and_then(|prev_slot| { - self.chain - .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) - .ok() - .flatten() - }); - - // Pick out the required blocks, ignoring skip-slots. - let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .collect::>>() - }); - - let block_roots = match maybe_block_roots { - Ok(block_roots) => block_roots, - Err(e) => { - error!( - request = ?req, - %peer_id, - error = ?e, - "Error during iteration over blocks" - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; + let block_roots = + self.get_block_roots_for_slot_range(req.start_slot, req.count, "BlobsByRange")?; let current_slot = self .chain @@ -1005,8 +936,6 @@ impl NetworkBeaconProcessor { ); }; - // remove all skip slots - let block_roots = block_roots.into_iter().flatten(); let mut blobs_sent = 0; for root in block_roots { @@ -1016,9 +945,8 @@ impl NetworkBeaconProcessor { blobs_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, + inbound_request_id, response: Response::BlobsByRange(Some(blob_sidecar.clone())), - request_id, - id: (connection_id, substream_id), }); } } @@ -1048,23 +976,13 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: DataColumnsByRangeRequest, ) { self.terminate_response_stream( peer_id, - connection_id, - substream_id, - request_id, - self.handle_data_columns_by_range_request_inner( - peer_id, - connection_id, - substream_id, - request_id, - req, - ), + inbound_request_id, + self.handle_data_columns_by_range_request_inner(peer_id, inbound_request_id, req), Response::DataColumnsByRange, ); } @@ -1073,9 +991,7 @@ impl NetworkBeaconProcessor { pub fn handle_data_columns_by_range_request_inner( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, req: DataColumnsByRangeRequest, ) -> Result<(), (RpcErrorResponse, &'static str)> { debug!( @@ -1131,71 +1047,8 @@ impl NetworkBeaconProcessor { }; } - let forwards_block_root_iter = - match self.chain.forwards_iter_block_roots(request_start_slot) { - Ok(iter) => iter, - Err(BeaconChainError::HistoricalBlockOutOfRange { - slot, - oldest_block_slot, - }) => { - debug!( - requested_slot = %slot, - oldest_known_slot = %oldest_block_slot, - "Range request failed during backfill" - ); - return Err((RpcErrorResponse::ResourceUnavailable, "Backfilling")); - } - Err(e) => { - error!( - request = ?req, - %peer_id, - error = ?e, - "Unable to obtain root iter" - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; - - // Use `WhenSlotSkipped::Prev` to get the most recent block root prior to - // `request_start_slot` in order to check whether the `request_start_slot` is a skip. - let mut last_block_root = req.start_slot.checked_sub(1).and_then(|prev_slot| { - self.chain - .block_root_at_slot(Slot::new(prev_slot), WhenSlotSkipped::Prev) - .ok() - .flatten() - }); - - // Pick out the required blocks, ignoring skip-slots. - let maybe_block_roots = process_results(forwards_block_root_iter, |iter| { - iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count)) - // map skip slots to None - .map(|(root, _)| { - let result = if Some(root) == last_block_root { - None - } else { - Some(root) - }; - last_block_root = Some(root); - result - }) - .collect::>>() - }); - - let block_roots = match maybe_block_roots { - Ok(block_roots) => block_roots, - Err(e) => { - error!( - request = ?req, - %peer_id, - error = ?e, - "Error during iteration over blocks" - ); - return Err((RpcErrorResponse::ServerError, "Database error")); - } - }; - - // remove all skip slots - let block_roots = block_roots.into_iter().flatten(); + let block_roots = + self.get_block_roots_for_slot_range(req.start_slot, req.count, "DataColumnsByRange")?; let mut data_columns_sent = 0; for root in block_roots { @@ -1205,11 +1058,10 @@ impl NetworkBeaconProcessor { data_columns_sent += 1; self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, + inbound_request_id, response: Response::DataColumnsByRange(Some( data_column_sidecar.clone(), )), - id: (connection_id, substream_id), }); } Ok(None) => {} // no-op @@ -1252,32 +1104,20 @@ impl NetworkBeaconProcessor { fn terminate_response_single_item Response>( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, result: Result, into_response: F, ) { match result { Ok(resp) => { - // Not necessary to explicitly send a termination message if this InboundRequest - // returns <= 1 for InboundRequest::expected_responses - // https://github.com/sigp/lighthouse/blob/3058b96f2560f1da04ada4f9d8ba8e5651794ff6/beacon_node/lighthouse_network/src/rpc/handler.rs#L555-L558 self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, + inbound_request_id, response: into_response(resp), - id: (connection_id, substream_id), }); } Err((error_code, reason)) => { - self.send_error_response( - peer_id, - error_code, - reason, - (connection_id, substream_id), - request_id, - ); + self.send_error_response(peer_id, error_code, reason, inbound_request_id); } } } @@ -1287,27 +1127,18 @@ impl NetworkBeaconProcessor { fn terminate_response_stream) -> Response>( &self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, result: Result<(), (RpcErrorResponse, &'static str)>, into_response: F, ) { match result { Ok(_) => self.send_network_message(NetworkMessage::SendResponse { peer_id, - request_id, + inbound_request_id, response: into_response(None), - id: (connection_id, substream_id), }), Err((error_code, reason)) => { - self.send_error_response( - peer_id, - error_code, - reason.into(), - (connection_id, substream_id), - request_id, - ); + self.send_error_response(peer_id, error_code, reason.into(), inbound_request_id); } } } diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 69ba5c1dbd..292e894870 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -9,14 +9,16 @@ use crate::{ sync::{manager::BlockProcessType, SyncMessage}, }; use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; use beacon_chain::test_utils::{ - test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, + get_kzg, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, + EphemeralHarnessType, }; use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; -use lighthouse_network::discovery::ConnectionId; +use itertools::Itertools; use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MetaDataV3}; -use lighthouse_network::rpc::{RequestId, SubstreamId}; +use lighthouse_network::rpc::InboundRequestId; use lighthouse_network::{ discv5::enr::{self, CombinedKey}, rpc::methods::{MetaData, MetaDataV2}, @@ -30,9 +32,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, Hash256, MainnetEthSpec, - ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, - SubnetId, + Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, DataColumnSidecarList, + DataColumnSubnetId, Epoch, Hash256, MainnetEthSpec, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, }; type E = MainnetEthSpec; @@ -53,6 +55,7 @@ struct TestRig { chain: Arc>, next_block: Arc>, next_blobs: Option>, + next_data_columns: Option>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -242,7 +245,7 @@ impl TestRig { let network_beacon_processor = Arc::new(network_beacon_processor); let beacon_processor = BeaconProcessor { - network_globals, + network_globals: network_globals.clone(), executor, current_workers: 0, config: beacon_processor_config, @@ -263,15 +266,36 @@ impl TestRig { assert!(beacon_processor.is_ok()); let block = next_block_tuple.0; - let blob_sidecars = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { - Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap()) + let (blob_sidecars, data_columns) = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { + if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + let kzg = get_kzg(&chain.spec); + let custody_columns: DataColumnSidecarList = blobs_to_data_column_sidecars( + &blobs.iter().collect_vec(), + kzg_proofs.clone().into_iter().collect_vec(), + &block, + &kzg, + &chain.spec, + ) + .unwrap() + .into_iter() + .filter(|c| network_globals.sampling_columns.contains(&c.index)) + .collect::>(); + + (None, Some(custody_columns)) + } else { + let blob_sidecars = + BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap(); + (Some(blob_sidecars), None) + } } else { - None + (None, None) }; + Self { chain, next_block: block, next_blobs: blob_sidecars, + next_data_columns: data_columns, attestations, next_block_attestations, next_block_aggregate_attestations, @@ -324,12 +348,38 @@ impl TestRig { } } + pub fn enqueue_gossip_data_columns(&self, col_index: usize) { + if let Some(data_columns) = self.next_data_columns.as_ref() { + let data_column = data_columns.get(col_index).unwrap(); + self.network_beacon_processor + .send_gossip_data_column_sidecar( + junk_message_id(), + junk_peer_id(), + Client::default(), + DataColumnSubnetId::from_column_index(data_column.index, &self.chain.spec), + data_column.clone(), + Duration::from_secs(0), + ) + .unwrap(); + } + } + + pub fn custody_columns_count(&self) -> usize { + self.network_beacon_processor + .network_globals + .custody_columns_count() as usize + } + pub fn enqueue_rpc_block(&self) { let block_root = self.next_block.canonical_root(); self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), + RpcBlock::new_without_blobs( + Some(block_root), + self.next_block.clone(), + self.custody_columns_count(), + ), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 0 }, ) @@ -341,7 +391,11 @@ impl TestRig { self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), + RpcBlock::new_without_blobs( + Some(block_root), + self.next_block.clone(), + self.custody_columns_count(), + ), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) @@ -362,13 +416,24 @@ impl TestRig { } } + pub fn enqueue_single_lookup_rpc_data_columns(&self) { + if let Some(data_columns) = self.next_data_columns.clone() { + self.network_beacon_processor + .send_rpc_custody_columns( + self.next_block.canonical_root(), + data_columns, + Duration::default(), + BlockProcessType::SingleCustodyColumn(1), + ) + .unwrap(); + } + } + pub fn enqueue_blobs_by_range_request(&self, count: u64) { self.network_beacon_processor .send_blobs_by_range_request( PeerId::random(), - ConnectionId::new_unchecked(42), - SubstreamId::new(24), - RequestId::new_unchecked(0), + InboundRequestId::new_unchecked(42, 24), BlobsByRangeRequest { start_slot: 0, count, @@ -621,6 +686,13 @@ async fn import_gossip_block_acceptably_early() { .await; } + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); + for i in 0..num_data_columns { + rig.enqueue_gossip_data_columns(i); + rig.assert_event_journal_completes(&[WorkType::GossipDataColumnSidecar]) + .await; + } + // Note: this section of the code is a bit race-y. We're assuming that we can set the slot clock // and check the head in the time between the block arrived early and when its due for // processing. @@ -697,19 +769,20 @@ async fn import_gossip_block_at_current_slot() { rig.assert_event_journal_completes(&[WorkType::GossipBlock]) .await; - let num_blobs = rig - .next_blobs - .as_ref() - .map(|blobs| blobs.len()) - .unwrap_or(0); - + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); for i in 0..num_blobs { rig.enqueue_gossip_blob(i); - rig.assert_event_journal_completes(&[WorkType::GossipBlobSidecar]) .await; } + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); + for i in 0..num_data_columns { + rig.enqueue_gossip_data_columns(i); + rig.assert_event_journal_completes(&[WorkType::GossipDataColumnSidecar]) + .await; + } + assert_eq!( rig.head_root(), rig.next_block.canonical_root(), @@ -762,11 +835,8 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod ); // Send the block and ensure that the attestation is received back and imported. - let num_blobs = rig - .next_blobs - .as_ref() - .map(|blobs| blobs.len()) - .unwrap_or(0); + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); let mut events = vec![]; match import_method { BlockImportMethod::Gossip => { @@ -776,6 +846,10 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod rig.enqueue_gossip_blob(i); events.push(WorkType::GossipBlobSidecar); } + for i in 0..num_data_columns { + rig.enqueue_gossip_data_columns(i); + events.push(WorkType::GossipDataColumnSidecar); + } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); @@ -784,6 +858,10 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod rig.enqueue_single_lookup_rpc_blobs(); events.push(WorkType::RpcBlobs); } + if num_data_columns > 0 { + rig.enqueue_single_lookup_rpc_data_columns(); + events.push(WorkType::RpcCustodyColumn); + } } }; @@ -843,11 +921,8 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod ); // Send the block and ensure that the attestation is received back and imported. - let num_blobs = rig - .next_blobs - .as_ref() - .map(|blobs| blobs.len()) - .unwrap_or(0); + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); let mut events = vec![]; match import_method { BlockImportMethod::Gossip => { @@ -857,6 +932,10 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod rig.enqueue_gossip_blob(i); events.push(WorkType::GossipBlobSidecar); } + for i in 0..num_data_columns { + rig.enqueue_gossip_data_columns(i); + events.push(WorkType::GossipDataColumnSidecar) + } } BlockImportMethod::Rpc => { rig.enqueue_rpc_block(); @@ -865,6 +944,10 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod rig.enqueue_single_lookup_rpc_blobs(); events.push(WorkType::RpcBlobs); } + if num_data_columns > 0 { + rig.enqueue_single_lookup_rpc_data_columns(); + events.push(WorkType::RpcCustodyColumn); + } } }; @@ -1049,12 +1132,20 @@ async fn test_rpc_block_reprocessing() { rig.assert_event_journal_completes(&[WorkType::RpcBlock]) .await; - rig.enqueue_single_lookup_rpc_blobs(); - if rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0) > 0 { + let num_blobs = rig.next_blobs.as_ref().map(|b| b.len()).unwrap_or(0); + if num_blobs > 0 { + rig.enqueue_single_lookup_rpc_blobs(); rig.assert_event_journal_completes(&[WorkType::RpcBlobs]) .await; } + let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); + if num_data_columns > 0 { + rig.enqueue_single_lookup_rpc_data_columns(); + rig.assert_event_journal_completes(&[WorkType::RpcCustodyColumn]) + .await; + } + // next_block shouldn't be processed since it couldn't get the // duplicate cache handle assert_ne!(next_block_root, rig.head_root()); @@ -1149,8 +1240,7 @@ async fn test_blobs_by_range() { if let NetworkMessage::SendResponse { peer_id: _, response: Response::BlobsByRange(blob), - id: _, - request_id: _, + inbound_request_id: _, } = next { if blob.is_some() { diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 7376244501..05c00b76af 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -14,12 +14,10 @@ use beacon_processor::{ work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend, DuplicateCache, }; use futures::prelude::*; -use lighthouse_network::discovery::ConnectionId; use lighthouse_network::rpc::*; use lighthouse_network::{ - rpc, service::api_types::{AppRequestId, SyncRequestId}, - MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Response, + MessageId, NetworkGlobals, PeerId, PubsubMessage, Response, }; use logging::crit; use logging::TimeLatch; @@ -54,19 +52,19 @@ pub enum RouterMessage { /// An RPC request has been received. RPCRequestReceived { peer_id: PeerId, - id: PeerRequestId, - request: rpc::Request, + inbound_request_id: InboundRequestId, + request_type: RequestType, }, /// An RPC response has been received. RPCResponseReceived { peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, response: Response, }, /// An RPC request failed RPCFailed { peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, error: RPCError, }, /// A gossip message has been received. The fields are: message id, the peer that sent us this @@ -159,24 +157,24 @@ impl Router { } RouterMessage::RPCRequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { - self.handle_rpc_request(peer_id, id, request); + self.handle_rpc_request(peer_id, inbound_request_id, request_type); } RouterMessage::RPCResponseReceived { peer_id, - request_id, + app_request_id, response, } => { - self.handle_rpc_response(peer_id, request_id, response); + self.handle_rpc_response(peer_id, app_request_id, response); } RouterMessage::RPCFailed { peer_id, - request_id, + app_request_id, error, } => { - self.on_rpc_error(peer_id, request_id, error); + self.on_rpc_error(peer_id, app_request_id, error); } RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { self.handle_gossip(id, peer_id, gossip, should_process); @@ -190,23 +188,18 @@ impl Router { fn handle_rpc_request( &mut self, peer_id: PeerId, - request_id: PeerRequestId, - rpc_request: rpc::Request, + inbound_request_id: InboundRequestId, // Use ResponseId here + request_type: RequestType, ) { if !self.network_globals.peers.read().is_connected(&peer_id) { - debug!( %peer_id, request = ?rpc_request, "Dropping request of disconnected peer"); + debug!(%peer_id, request = ?request_type, "Dropping request of disconnected peer"); return; } - match rpc_request.r#type { - RequestType::Status(status_message) => self.on_status_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - status_message, - ), + match request_type { + RequestType::Status(status_message) => { + self.on_status_request(peer_id, inbound_request_id, status_message) + } RequestType::BlocksByRange(request) => { - // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 let mut count = *request.count(); if *request.step() > 1 { count = 1; @@ -223,9 +216,7 @@ impl Router { self.handle_beacon_processor_send_result( self.network_beacon_processor.send_blocks_by_range_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, blocks_request, ), ) @@ -233,86 +224,50 @@ impl Router { RequestType::BlocksByRoot(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor.send_blocks_by_roots_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, request, ), ), RequestType::BlobsByRange(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor.send_blobs_by_range_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, request, ), ), RequestType::BlobsByRoot(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor.send_blobs_by_roots_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, request, ), ), RequestType::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_roots_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - request, - ), + .send_data_columns_by_roots_request(peer_id, inbound_request_id, request), ), RequestType::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_data_columns_by_range_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - request, - ), + .send_data_columns_by_range_request(peer_id, inbound_request_id, request), ), RequestType::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_bootstrap_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - request, - ), + .send_light_client_bootstrap_request(peer_id, inbound_request_id, request), ), RequestType::LightClientOptimisticUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_optimistic_update_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - ), + .send_light_client_optimistic_update_request(peer_id, inbound_request_id), ), RequestType::LightClientFinalityUpdate => self.handle_beacon_processor_send_result( self.network_beacon_processor - .send_light_client_finality_update_request( - peer_id, - request_id.0, - request_id.1, - rpc_request.id, - ), + .send_light_client_finality_update_request(peer_id, inbound_request_id), ), RequestType::LightClientUpdatesByRange(request) => self .handle_beacon_processor_send_result( self.network_beacon_processor .send_light_client_updates_by_range_request( peer_id, - request_id.0, - request_id.1, - rpc_request.id, + inbound_request_id, request, ), ), @@ -324,7 +279,7 @@ impl Router { fn handle_rpc_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, response: Response, ) { match response { @@ -336,22 +291,22 @@ impl Router { ) } Response::BlocksByRange(beacon_block) => { - self.on_blocks_by_range_response(peer_id, request_id, beacon_block); + self.on_blocks_by_range_response(peer_id, app_request_id, beacon_block); } Response::BlocksByRoot(beacon_block) => { - self.on_blocks_by_root_response(peer_id, request_id, beacon_block); + self.on_blocks_by_root_response(peer_id, app_request_id, beacon_block); } Response::BlobsByRange(blob) => { - self.on_blobs_by_range_response(peer_id, request_id, blob); + self.on_blobs_by_range_response(peer_id, app_request_id, blob); } Response::BlobsByRoot(blob) => { - self.on_blobs_by_root_response(peer_id, request_id, blob); + self.on_blobs_by_root_response(peer_id, app_request_id, blob); } Response::DataColumnsByRoot(data_column) => { - self.on_data_columns_by_root_response(peer_id, request_id, data_column); + self.on_data_columns_by_root_response(peer_id, app_request_id, data_column); } Response::DataColumnsByRange(data_column) => { - self.on_data_columns_by_range_response(peer_id, request_id, data_column); + self.on_data_columns_by_range_response(peer_id, app_request_id, data_column); } // Light client responses should not be received Response::LightClientBootstrap(_) @@ -563,12 +518,12 @@ impl Router { /// An error occurred during an RPC request. The state is maintained by the sync manager, so /// this function notifies the sync manager of the error. - pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: AppRequestId, error: RPCError) { + pub fn on_rpc_error(&mut self, peer_id: PeerId, app_request_id: AppRequestId, error: RPCError) { // Check if the failed RPC belongs to sync - if let AppRequestId::Sync(request_id) = request_id { + if let AppRequestId::Sync(sync_request_id) = app_request_id { self.send_to_sync(SyncMessage::RpcError { peer_id, - request_id, + sync_request_id, error, }); } @@ -580,9 +535,7 @@ impl Router { pub fn on_status_request( &mut self, peer_id: PeerId, - connection_id: ConnectionId, - substream_id: SubstreamId, - request_id: RequestId, + inbound_request_id: InboundRequestId, // Use ResponseId here status: StatusMessage, ) { debug!(%peer_id, ?status, "Received Status Request"); @@ -590,9 +543,8 @@ impl Router { // Say status back. self.network.send_response( peer_id, + inbound_request_id, Response::Status(status_message(&self.chain)), - (connection_id, substream_id), - request_id, ); self.handle_beacon_processor_send_result( @@ -606,11 +558,11 @@ impl Router { pub fn on_blocks_by_range_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, beacon_block: Option>>, ) { - let request_id = match request_id { - AppRequestId::Sync(sync_id) => match sync_id { + let sync_request_id = match app_request_id { + AppRequestId::Sync(sync_request_id) => match sync_request_id { id @ SyncRequestId::BlocksByRange { .. } => id, other => { crit!(request = ?other, "BlocksByRange response on incorrect request"); @@ -621,6 +573,7 @@ impl Router { crit!(%peer_id, "All BBRange requests belong to sync"); return; } + AppRequestId::Internal => unreachable!("Handled internally"), }; trace!( @@ -631,7 +584,7 @@ impl Router { self.send_to_sync(SyncMessage::RpcBlock { peer_id, - request_id, + sync_request_id, beacon_block, seen_timestamp: timestamp_now(), }); @@ -640,7 +593,7 @@ impl Router { pub fn on_blobs_by_range_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, blob_sidecar: Option>>, ) { trace!( @@ -648,10 +601,10 @@ impl Router { "Received BlobsByRange Response" ); - if let AppRequestId::Sync(id) = request_id { + if let AppRequestId::Sync(sync_request_id) = app_request_id { self.send_to_sync(SyncMessage::RpcBlob { peer_id, - request_id: id, + sync_request_id, blob_sidecar, seen_timestamp: timestamp_now(), }); @@ -664,10 +617,10 @@ impl Router { pub fn on_blocks_by_root_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, beacon_block: Option>>, ) { - let request_id = match request_id { + let sync_request_id = match app_request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlock { .. } => id, other => { @@ -679,6 +632,7 @@ impl Router { crit!(%peer_id, "All BBRoot requests belong to sync"); return; } + AppRequestId::Internal => unreachable!("Handled internally"), }; trace!( @@ -687,7 +641,7 @@ impl Router { ); self.send_to_sync(SyncMessage::RpcBlock { peer_id, - request_id, + sync_request_id, beacon_block, seen_timestamp: timestamp_now(), }); @@ -697,10 +651,10 @@ impl Router { pub fn on_blobs_by_root_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, blob_sidecar: Option>>, ) { - let request_id = match request_id { + let sync_request_id = match app_request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlob { .. } => id, other => { @@ -712,6 +666,7 @@ impl Router { crit!(%peer_id, "All BlobsByRoot requests belong to sync"); return; } + AppRequestId::Internal => unreachable!("Handled internally"), }; trace!( @@ -719,7 +674,7 @@ impl Router { "Received BlobsByRoot Response" ); self.send_to_sync(SyncMessage::RpcBlob { - request_id, + sync_request_id, peer_id, blob_sidecar, seen_timestamp: timestamp_now(), @@ -730,10 +685,10 @@ impl Router { pub fn on_data_columns_by_root_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, data_column: Option>>, ) { - let request_id = match request_id { + let sync_request_id = match app_request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::DataColumnsByRoot { .. } => id, other => { @@ -745,6 +700,7 @@ impl Router { crit!(%peer_id, "All DataColumnsByRoot requests belong to sync"); return; } + AppRequestId::Internal => unreachable!("Handled internally"), }; trace!( @@ -752,7 +708,7 @@ impl Router { "Received DataColumnsByRoot Response" ); self.send_to_sync(SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column, seen_timestamp: timestamp_now(), @@ -762,7 +718,7 @@ impl Router { pub fn on_data_columns_by_range_response( &mut self, peer_id: PeerId, - request_id: AppRequestId, + app_request_id: AppRequestId, data_column: Option>>, ) { trace!( @@ -770,10 +726,10 @@ impl Router { "Received DataColumnsByRange Response" ); - if let AppRequestId::Sync(id) = request_id { + if let AppRequestId::Sync(sync_request_id) = app_request_id { self.send_to_sync(SyncMessage::RpcDataColumn { peer_id, - request_id: id, + sync_request_id, data_column, seen_timestamp: timestamp_now(), }); @@ -824,7 +780,7 @@ impl HandlerNetworkContext { pub fn send_processor_request(&mut self, peer_id: PeerId, request: RequestType) { self.inform_network(NetworkMessage::SendRequest { peer_id, - request_id: AppRequestId::Router, + app_request_id: AppRequestId::Router, request, }) } @@ -833,14 +789,12 @@ impl HandlerNetworkContext { pub fn send_response( &mut self, peer_id: PeerId, + inbound_request_id: InboundRequestId, response: Response, - id: PeerRequestId, - request_id: RequestId, ) { self.inform_network(NetworkMessage::SendResponse { - request_id, peer_id, - id, + inbound_request_id, response, }) } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 778ac63290..7afd62ab2e 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -10,14 +10,15 @@ use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconPro use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; -use lighthouse_network::rpc::{RequestId, RequestType}; +use lighthouse_network::rpc::InboundRequestId; +use lighthouse_network::rpc::RequestType; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; use lighthouse_network::Enr; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RpcErrorResponse}, - Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Response, Subnet, + Context, PeerAction, PubsubMessage, ReportSource, Response, Subnet, }; use lighthouse_network::{ service::api_types::AppRequestId, @@ -61,22 +62,20 @@ pub enum NetworkMessage { SendRequest { peer_id: PeerId, request: RequestType, - request_id: AppRequestId, + app_request_id: AppRequestId, }, /// Send a successful Response to the libp2p service. SendResponse { peer_id: PeerId, - request_id: RequestId, + inbound_request_id: InboundRequestId, response: Response, - id: PeerRequestId, }, /// Sends an error response to an RPC request. SendErrorResponse { peer_id: PeerId, - request_id: RequestId, + inbound_request_id: InboundRequestId, error: RpcErrorResponse, reason: String, - id: PeerRequestId, }, /// Publish a list of messages to the gossipsub protocol. Publish { messages: Vec> }, @@ -488,30 +487,34 @@ impl NetworkService { } NetworkEvent::RequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, } => { self.send_to_router(RouterMessage::RPCRequestReceived { peer_id, - id, - request, + inbound_request_id, + request_type, }); } NetworkEvent::ResponseReceived { peer_id, - id, + app_request_id, response, } => { self.send_to_router(RouterMessage::RPCResponseReceived { peer_id, - request_id: id, + app_request_id, response, }); } - NetworkEvent::RPCFailed { id, peer_id, error } => { + NetworkEvent::RPCFailed { + app_request_id, + peer_id, + error, + } => { self.send_to_router(RouterMessage::RPCFailed { peer_id, - request_id: id, + app_request_id, error, }); } @@ -601,35 +604,34 @@ impl NetworkService { NetworkMessage::SendRequest { peer_id, request, - request_id, + app_request_id, } => { - if let Err((request_id, error)) = - self.libp2p.send_request(peer_id, request_id, request) + if let Err((app_request_id, error)) = + self.libp2p.send_request(peer_id, app_request_id, request) { self.send_to_router(RouterMessage::RPCFailed { peer_id, - request_id, + app_request_id, error, }); } } NetworkMessage::SendResponse { peer_id, + inbound_request_id, response, - id, - request_id, } => { - self.libp2p.send_response(peer_id, id, request_id, response); + self.libp2p + .send_response(peer_id, inbound_request_id, response); } NetworkMessage::SendErrorResponse { peer_id, error, - id, - request_id, + inbound_request_id, reason, } => { self.libp2p - .send_error_response(peer_id, id, request_id, error, reason); + .send_error_response(peer_id, inbound_request_id, error, reason); } NetworkMessage::ValidationResult { propagation_source, diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 7e274850b5..7fdf9047fc 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -7,6 +7,8 @@ use beacon_chain::{ }; use genesis::{generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::NetworkConfig; +use rand::rngs::StdRng; +use rand::SeedableRng; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::sync::{Arc, LazyLock}; use std::time::{Duration, SystemTime}; @@ -76,6 +78,7 @@ impl TestBeaconChain { Duration::from_millis(SLOT_DURATION_MILLIS), )) .shutdown_sender(shutdown_tx) + .rng(Box::new(StdRng::seed_from_u64(42))) .build() .expect("should build"), ); @@ -116,18 +119,16 @@ fn get_subnet_service() -> SubnetService { ) } -// gets a number of events from the subscription service, or returns none if it times out after a number -// of slots -async fn get_events + Unpin>( +// gets a number of events from the subscription service, or returns none if it times out after a +// specified duration. +async fn get_events_until_timeout + Unpin>( stream: &mut S, num_events: Option, - num_slots_before_timeout: u32, + timeout: Duration, ) -> Vec { let mut events = Vec::new(); - - let timeout = - tokio::time::sleep(Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout); - futures::pin_mut!(timeout); + let sleep = tokio::time::sleep(timeout); + futures::pin_mut!(sleep); loop { tokio::select! { @@ -139,7 +140,7 @@ async fn get_events + Unpin>( } } } - _ = timeout.as_mut() => { + _ = sleep.as_mut() => { break; } @@ -149,6 +150,17 @@ async fn get_events + Unpin>( events } +// gets a number of events from the subscription service, or returns none if it times out after a number +// of slots +async fn get_events_until_num_slots + Unpin>( + stream: &mut S, + num_events: Option, + num_slots_before_timeout: u32, +) -> Vec { + let timeout = Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout; + get_events_until_timeout(stream, num_events, timeout).await +} + mod test { #[cfg(not(windows))] @@ -196,7 +208,7 @@ mod test { // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); - let _events = get_events(&mut subnet_service, None, 1).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 1).await; let current_slot = subnet_service .beacon_chain @@ -249,7 +261,7 @@ mod test { ]; // Wait for 1 slot duration to get the unsubscription event - let events = get_events( + let events = get_events_until_num_slots( &mut subnet_service, Some(2), (MainnetEthSpec::slots_per_epoch()) as u32, @@ -281,7 +293,7 @@ mod test { // create the subnet service and subscriptions let mut subnet_service = get_subnet_service(); - let _events = get_events(&mut subnet_service, None, 0).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 0).await; let current_slot = subnet_service .beacon_chain .slot_clock @@ -330,14 +342,14 @@ mod test { if subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { // If we are permanently subscribed to this subnet, we won't see a subscribe message - let _ = get_events(&mut subnet_service, None, 1).await; + let _ = get_events_until_num_slots(&mut subnet_service, None, 1).await; } else { - let subscription = get_events(&mut subnet_service, None, 1).await; + let subscription = get_events_until_num_slots(&mut subnet_service, None, 1).await; assert_eq!(subscription, [expected]); } // Get event for 1 more slot duration, we should get the unsubscribe event now. - let unsubscribe_event = get_events(&mut subnet_service, None, 1).await; + let unsubscribe_event = get_events_until_num_slots(&mut subnet_service, None, 1).await; // If the long lived and short lived subnets are different, we should get an unsubscription // event. @@ -376,7 +388,7 @@ mod test { // submit the subscriptions subnet_service.validator_subscriptions(subscriptions.into_iter()); - let events = get_events(&mut subnet_service, Some(130), 10).await; + let events = get_events_until_num_slots(&mut subnet_service, Some(130), 10).await; let mut discover_peer_count = 0; let mut enr_add_count = 0; let mut unsubscribe_event_count = 0; @@ -445,7 +457,7 @@ mod test { // submit the subscriptions subnet_service.validator_subscriptions(subscriptions.into_iter()); - let events = get_events(&mut subnet_service, None, 3).await; + let events = get_events_until_num_slots(&mut subnet_service, None, 3).await; let mut discover_peer_count = 0; let mut enr_add_count = 0; let mut unexpected_msg_count = 0; @@ -495,7 +507,7 @@ mod test { // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); // Remove permanent events - let _events = get_events(&mut subnet_service, None, 0).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 0).await; let current_slot = subnet_service .beacon_chain @@ -560,7 +572,7 @@ mod test { // Unsubscription event should happen at the end of the slot. // We wait for 2 slots, to avoid timeout issues - let events = get_events(&mut subnet_service, None, 2).await; + let events = get_events_until_num_slots(&mut subnet_service, None, 2).await; let expected_subscription = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1)); @@ -577,28 +589,26 @@ mod test { println!("{events:?}"); let subscription_slot = current_slot + subscription_slot2 - 1; // one less do to the // advance subscription time - let wait_slots = subnet_service + let wait_duration = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_slot) - .unwrap() - .as_millis() as u64 - / SLOT_DURATION_MILLIS; + .unwrap(); - let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); + let no_events = + dbg!(get_events_until_timeout(&mut subnet_service, None, wait_duration).await); assert_eq!(no_events, []); let subscription_end_slot = current_slot + subscription_slot2 + 2; // +1 to get to the end of the duty slot, +1 for the slot to complete - let wait_slots = subnet_service + let wait_duration = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_end_slot) - .unwrap() - .as_millis() as u64 - / SLOT_DURATION_MILLIS; + .unwrap(); - let second_subscribe_event = get_events(&mut subnet_service, None, wait_slots as u32).await; + let second_subscribe_event = + get_events_until_timeout(&mut subnet_service, None, wait_duration).await; // If the permanent and short lived subnets are different, we should get an unsubscription event. if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!( @@ -612,28 +622,26 @@ mod test { let subscription_slot = current_slot + subscription_slot3 - 1; - let wait_slots = subnet_service + let wait_duration = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_slot) - .unwrap() - .as_millis() as u64 - / SLOT_DURATION_MILLIS; + .unwrap(); - let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); + let no_events = + dbg!(get_events_until_timeout(&mut subnet_service, None, wait_duration).await); assert_eq!(no_events, []); let subscription_end_slot = current_slot + subscription_slot3 + 2; // +1 to get to the end of the duty slot, +1 for the slot to complete - let wait_slots = subnet_service + let wait_duration = subnet_service .beacon_chain .slot_clock .duration_to_slot(subscription_end_slot) - .unwrap() - .as_millis() as u64 - / SLOT_DURATION_MILLIS; + .unwrap(); - let third_subscribe_event = get_events(&mut subnet_service, None, wait_slots as u32).await; + let third_subscribe_event = + get_events_until_timeout(&mut subnet_service, None, wait_duration).await; if !subnet_service.is_subscribed_permanent(&Subnet::Attestation(subnet_id1)) { assert_eq!( @@ -652,7 +660,7 @@ mod test { // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); - let _events = get_events(&mut subnet_service, None, 0).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 0).await; let subscriptions = std::iter::once(Subscription::SyncCommittee(SyncCommitteeSubscription { @@ -673,7 +681,7 @@ mod test { let subnet_id = subnet_ids.iter().next().unwrap(); // Note: the unsubscription event takes 2 epochs (8 * 2 * 0.4 secs = 3.2 secs) - let events = get_events( + let events = get_events_until_num_slots( &mut subnet_service, Some(5), (MainnetEthSpec::slots_per_epoch() * 3) as u32, // Have some buffer time before getting 5 events @@ -709,7 +717,7 @@ mod test { // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); // Get the initial events from permanent subnet subscriptions - let _events = get_events(&mut subnet_service, None, 1).await; + let _events = get_events_until_num_slots(&mut subnet_service, None, 1).await; let subscriptions = std::iter::once(Subscription::SyncCommittee(SyncCommitteeSubscription { @@ -722,7 +730,7 @@ mod test { subnet_service.validator_subscriptions(subscriptions); // Get all immediate events (won't include unsubscriptions) - let events = get_events(&mut subnet_service, None, 1).await; + let events = get_events_until_num_slots(&mut subnet_service, None, 1).await; matches::assert_matches!( events[..], [ @@ -752,7 +760,7 @@ mod test { subnet_service.validator_subscriptions(subscriptions.into_iter()); // Get all immediate events (won't include unsubscriptions) - let events = get_events(&mut subnet_service, None, 1).await; + let events = get_events_until_num_slots(&mut subnet_service, None, 1).await; matches::assert_matches!(events[..], [SubnetServiceMessage::DiscoverPeers(_),]); // Should be unsubscribed at the end. diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 8eefb2d675..86b6894bac 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -6,7 +6,6 @@ use crate::sync::block_lookups::{ }; use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use lighthouse_network::service::api_types::Id; use parking_lot::RwLock; @@ -97,13 +96,8 @@ impl RequestState for BlockRequestState { seen_timestamp, .. } = download_result; - cx.send_block_for_processing( - id, - block_root, - RpcBlock::new_without_blobs(Some(block_root), value), - seen_timestamp, - ) - .map_err(LookupRequestError::SendFailedProcessor) + cx.send_block_for_processing(id, block_root, value, seen_timestamp) + .map_err(LookupRequestError::SendFailedProcessor) } fn response_type() -> ResponseType { diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index ef9285c8dc..99428b0c80 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -266,7 +266,8 @@ impl RangeBlockComponentsRequest { ) .map_err(|e| format!("{e:?}"))? } else { - RpcBlock::new_without_blobs(Some(block_root), block) + // Block has no data, expects zero columns + RpcBlock::new_without_blobs(Some(block_root), block, 0) }); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 9a48e9aa5d..84e492c04f 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -108,7 +108,7 @@ pub enum SyncMessage { /// A block has been received from the RPC. RpcBlock { - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, beacon_block: Option>>, seen_timestamp: Duration, @@ -116,7 +116,7 @@ pub enum SyncMessage { /// A blob has been received from the RPC. RpcBlob { - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, blob_sidecar: Option>>, seen_timestamp: Duration, @@ -124,7 +124,7 @@ pub enum SyncMessage { /// A data columns has been received from the RPC RpcDataColumn { - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, data_column: Option>>, seen_timestamp: Duration, @@ -153,7 +153,7 @@ pub enum SyncMessage { /// An RPC Error has occurred on a request. RpcError { peer_id: PeerId, - request_id: SyncRequestId, + sync_request_id: SyncRequestId, error: RPCError, }, @@ -477,9 +477,9 @@ impl SyncManager { } /// Handles RPC errors related to requests that were emitted from the sync manager. - fn inject_error(&mut self, peer_id: PeerId, request_id: SyncRequestId, error: RPCError) { + fn inject_error(&mut self, peer_id: PeerId, sync_request_id: SyncRequestId, error: RPCError) { trace!("Sync manager received a failed RPC"); - match request_id { + match sync_request_id { SyncRequestId::SingleBlock { id } => { self.on_single_block_response(id, peer_id, RpcEvent::RPCError(error)) } @@ -509,8 +509,8 @@ impl SyncManager { fn peer_disconnect(&mut self, peer_id: &PeerId) { // Inject a Disconnected error on all requests associated with the disconnected peer // to retry all batches/lookups - for request_id in self.network.peer_disconnected(peer_id) { - self.inject_error(*peer_id, request_id, RPCError::Disconnected); + for sync_request_id in self.network.peer_disconnected(peer_id) { + self.inject_error(*peer_id, sync_request_id, RPCError::Disconnected); } // Remove peer from all data structures @@ -685,7 +685,7 @@ impl SyncManager { if new_state.is_synced() && !matches!( old_state, - SyncState::Synced { .. } | SyncState::BackFillSyncing { .. } + SyncState::Synced | SyncState::BackFillSyncing { .. } ) { self.network.subscribe_core_topics(); @@ -751,25 +751,27 @@ impl SyncManager { self.add_peers_force_range_sync(&peers, head_root, head_slot); } SyncMessage::RpcBlock { - request_id, + sync_request_id, peer_id, beacon_block, seen_timestamp, } => { - self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp); + self.rpc_block_received(sync_request_id, peer_id, beacon_block, seen_timestamp); } SyncMessage::RpcBlob { - request_id, + sync_request_id, peer_id, blob_sidecar, seen_timestamp, - } => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp), + } => self.rpc_blob_received(sync_request_id, peer_id, blob_sidecar, seen_timestamp), SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column, seen_timestamp, - } => self.rpc_data_column_received(request_id, peer_id, data_column, seen_timestamp), + } => { + self.rpc_data_column_received(sync_request_id, peer_id, data_column, seen_timestamp) + } SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -845,9 +847,9 @@ impl SyncManager { } SyncMessage::RpcError { peer_id, - request_id, + sync_request_id, error, - } => self.inject_error(peer_id, request_id, error), + } => self.inject_error(peer_id, sync_request_id, error), SyncMessage::BlockComponentProcessed { process_type, result, @@ -1018,12 +1020,12 @@ impl SyncManager { fn rpc_block_received( &mut self, - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, block: Option>>, seen_timestamp: Duration, ) { - match request_id { + match sync_request_id { SyncRequestId::SingleBlock { id } => self.on_single_block_response( id, peer_id, @@ -1060,12 +1062,12 @@ impl SyncManager { fn rpc_blob_received( &mut self, - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, blob: Option>>, seen_timestamp: Duration, ) { - match request_id { + match sync_request_id { SyncRequestId::SingleBlob { id } => self.on_single_blob_response( id, peer_id, @@ -1084,12 +1086,12 @@ impl SyncManager { fn rpc_data_column_received( &mut self, - request_id: SyncRequestId, + sync_request_id: SyncRequestId, peer_id: PeerId, data_column: Option>>, seen_timestamp: Duration, ) { - match request_id { + match sync_request_id { SyncRequestId::DataColumnsByRoot(req_id) => { self.on_data_columns_by_root_response( req_id, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 16fcf93bcf..2cb5ec9a0a 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -372,11 +372,11 @@ impl SyncNetworkContext { ); let request = RequestType::Status(status_message.clone()); - let request_id = AppRequestId::Router; + let app_request_id = AppRequestId::Router; let _ = self.send_network_msg(NetworkMessage::SendRequest { peer_id, request, - request_id, + app_request_id, }); } } @@ -595,7 +595,7 @@ impl SyncNetworkContext { .send(NetworkMessage::SendRequest { peer_id, request: RequestType::BlocksByRoot(request.into_request(&self.fork_context)), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -684,7 +684,7 @@ impl SyncNetworkContext { .send(NetworkMessage::SendRequest { peer_id, request: RequestType::BlobsByRoot(request.clone().into_request(&self.fork_context)), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -733,7 +733,7 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, request: RequestType::DataColumnsByRoot(request.clone().into_request(&self.chain.spec)), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(id)), })?; debug!( @@ -839,7 +839,7 @@ impl SyncNetworkContext { .send(NetworkMessage::SendRequest { peer_id, request: RequestType::BlocksByRange(request.clone().into()), - request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -880,7 +880,7 @@ impl SyncNetworkContext { .send(NetworkMessage::SendRequest { peer_id, request: RequestType::BlobsByRange(request.clone()), - request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -919,7 +919,7 @@ impl SyncNetworkContext { self.send_network_msg(NetworkMessage::SendRequest { peer_id, request: RequestType::DataColumnsByRange(request.clone()), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), }) .map_err(|_| RpcRequestSendError::NetworkSendError)?; @@ -1308,7 +1308,7 @@ impl SyncNetworkContext { &self, id: Id, block_root: Hash256, - block: RpcBlock, + block: Arc>, seen_timestamp: Duration, ) -> Result<(), SendErrorProcessor> { let span = span!( @@ -1322,6 +1322,12 @@ impl SyncNetworkContext { .beacon_processor_if_enabled() .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; + let block = RpcBlock::new_without_blobs( + Some(block_root), + block, + self.network_globals().custody_columns_count() as usize, + ); + debug!(block = ?block_root, id, "Sending block for processing"); // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync // must receive a single `SyncMessage::BlockComponentProcessed` with this process type diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index 018381a850..e7e6e62349 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -102,7 +102,6 @@ impl ActiveCustodyRequest { ) -> CustodyRequestResult { let Some(batch_request) = self.active_batch_columns_requests.get_mut(&req_id) else { warn!( - id = ?self.custody_id, block_root = ?self.block_root, %req_id, "Received custody column response for unrequested index" @@ -113,7 +112,6 @@ impl ActiveCustodyRequest { match resp { Ok((data_columns, seen_timestamp)) => { debug!( - id = ?self.custody_id, block_root = ?self.block_root, %req_id, %peer_id, @@ -161,7 +159,6 @@ impl ActiveCustodyRequest { if !missing_column_indexes.is_empty() { // Note: Batch logging that columns are missing to not spam logger debug!( - id = ?self.custody_id, block_root = ?self.block_root, %req_id, %peer_id, @@ -175,7 +172,6 @@ impl ActiveCustodyRequest { } Err(err) => { debug!( - id = ?self.custody_id, block_root = ?self.block_root, %req_id, %peer_id, diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index fe72979930..84c95b2a4c 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -460,7 +460,7 @@ impl TestRig { ) { self.log("parent_lookup_block_response"); self.send_sync_message(SyncMessage::RpcBlock { - request_id: SyncRequestId::SingleBlock { id }, + sync_request_id: SyncRequestId::SingleBlock { id }, peer_id, beacon_block, seen_timestamp: D, @@ -475,7 +475,7 @@ impl TestRig { ) { self.log("single_lookup_block_response"); self.send_sync_message(SyncMessage::RpcBlock { - request_id: SyncRequestId::SingleBlock { id }, + sync_request_id: SyncRequestId::SingleBlock { id }, peer_id, beacon_block, seen_timestamp: D, @@ -493,7 +493,7 @@ impl TestRig { blob_sidecar.as_ref().map(|b| b.index) )); self.send_sync_message(SyncMessage::RpcBlob { - request_id: SyncRequestId::SingleBlob { id }, + sync_request_id: SyncRequestId::SingleBlob { id }, peer_id, blob_sidecar, seen_timestamp: D, @@ -507,7 +507,7 @@ impl TestRig { blob_sidecar: Option>>, ) { self.send_sync_message(SyncMessage::RpcBlob { - request_id: SyncRequestId::SingleBlob { id }, + sync_request_id: SyncRequestId::SingleBlob { id }, peer_id, blob_sidecar, seen_timestamp: D, @@ -583,7 +583,7 @@ impl TestRig { fn parent_lookup_failed(&mut self, id: SingleLookupReqId, peer_id: PeerId, error: RPCError) { self.send_sync_message(SyncMessage::RpcError { peer_id, - request_id: SyncRequestId::SingleBlock { id }, + sync_request_id: SyncRequestId::SingleBlock { id }, error, }) } @@ -602,7 +602,7 @@ impl TestRig { fn single_lookup_failed(&mut self, id: SingleLookupReqId, peer_id: PeerId, error: RPCError) { self.send_sync_message(SyncMessage::RpcError { peer_id, - request_id: SyncRequestId::SingleBlock { id }, + sync_request_id: SyncRequestId::SingleBlock { id }, error, }) } @@ -614,11 +614,11 @@ impl TestRig { } } - fn return_empty_sampling_request(&mut self, (request_id, _): DCByRootId) { + fn return_empty_sampling_request(&mut self, (sync_request_id, _): DCByRootId) { let peer_id = PeerId::random(); // Send stream termination self.send_sync_message(SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column: None, seen_timestamp: timestamp_now(), @@ -631,10 +631,10 @@ impl TestRig { peer_id: PeerId, error: RPCError, ) { - for (request_id, _) in sampling_ids { + for (sync_request_id, _) in sampling_ids { self.send_sync_message(SyncMessage::RpcError { peer_id, - request_id, + sync_request_id, error: error.clone(), }) } @@ -760,14 +760,14 @@ impl TestRig { fn complete_data_columns_by_root_request( &mut self, - (request_id, _): DCByRootId, + (sync_request_id, _): DCByRootId, data_columns: &[Arc>], ) { let peer_id = PeerId::random(); for data_column in data_columns { // Send chunks self.send_sync_message(SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column: Some(data_column.clone()), seen_timestamp: timestamp_now(), @@ -775,7 +775,7 @@ impl TestRig { } // Send stream termination self.send_sync_message(SyncMessage::RpcDataColumn { - request_id, + sync_request_id, peer_id, data_column: None, seen_timestamp: timestamp_now(), @@ -785,17 +785,17 @@ impl TestRig { /// Return RPCErrors for all active requests of peer fn rpc_error_all_active_requests(&mut self, disconnected_peer_id: PeerId) { self.drain_network_rx(); - while let Ok(request_id) = self.pop_received_network_event(|ev| match ev { + while let Ok(sync_request_id) = self.pop_received_network_event(|ev| match ev { NetworkMessage::SendRequest { peer_id, - request_id: AppRequestId::Sync(id), + app_request_id: AppRequestId::Sync(id), .. } if *peer_id == disconnected_peer_id => Some(*id), _ => None, }) { self.send_sync_message(SyncMessage::RpcError { peer_id: disconnected_peer_id, - request_id, + sync_request_id, error: RPCError::Disconnected, }); } @@ -879,7 +879,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::BlocksByRoot(request), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, }) @@ -899,7 +899,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::BlobsByRoot(request), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids .to_vec() @@ -924,7 +924,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::BlocksByRoot(request), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlock { id }), } if request.block_roots().to_vec().contains(&for_block) => Some(*id), _ => None, }) @@ -946,7 +946,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::BlobsByRoot(request), - request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), + app_request_id: AppRequestId::Sync(SyncRequestId::SingleBlob { id }), } if request .blob_ids .to_vec() @@ -974,7 +974,8 @@ impl TestRig { NetworkMessage::SendRequest { peer_id: _, request: RequestType::DataColumnsByRoot(request), - request_id: AppRequestId::Sync(id @ SyncRequestId::DataColumnsByRoot { .. }), + app_request_id: + AppRequestId::Sync(id @ SyncRequestId::DataColumnsByRoot { .. }), } if request .data_column_ids .to_vec() @@ -1296,7 +1297,7 @@ impl TestRig { .sync_manager .get_sampling_request_status(block_root, index) .unwrap_or_else(|| panic!("No request state for {index}")); - if !matches!(status, crate::sync::peer_sampling::Status::NoPeers { .. }) { + if !matches!(status, crate::sync::peer_sampling::Status::NoPeers) { panic!("expected {block_root} {index} request to be no peers: {status:?}"); } } diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index ca4344c0b2..932f485dd0 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -223,7 +223,7 @@ impl TestRig { RequestType::BlocksByRange(OldBlocksByRangeRequest::V2( OldBlocksByRangeRequestV2 { start_slot, .. }, )), - request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::BlocksByRange(id)), } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) @@ -240,7 +240,7 @@ impl TestRig { RequestType::DataColumnsByRange(DataColumnsByRangeRequest { start_slot, .. }), - request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRange(id)), } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) { @@ -256,7 +256,7 @@ impl TestRig { NetworkMessage::SendRequest { peer_id, request: RequestType::BlobsByRange(BlobsByRangeRequest { start_slot, .. }), - request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), + app_request_id: AppRequestId::Sync(SyncRequestId::BlobsByRange(id)), } if filter_f(*peer_id, *start_slot) => Some((*id, *peer_id)), _ => None, }) @@ -283,7 +283,7 @@ impl TestRig { "Completing BlocksByRange request {blocks_req_id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcBlock { - request_id: SyncRequestId::BlocksByRange(blocks_req_id), + sync_request_id: SyncRequestId::BlocksByRange(blocks_req_id), peer_id: block_peer, beacon_block: None, seen_timestamp: D, @@ -297,7 +297,7 @@ impl TestRig { "Completing BlobsByRange request {id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcBlob { - request_id: SyncRequestId::BlobsByRange(id), + sync_request_id: SyncRequestId::BlobsByRange(id), peer_id, blob_sidecar: None, seen_timestamp: D, @@ -310,7 +310,7 @@ impl TestRig { "Completing DataColumnsByRange request {id:?} with empty stream" )); self.send_sync_message(SyncMessage::RpcDataColumn { - request_id: SyncRequestId::DataColumnsByRange(id), + sync_request_id: SyncRequestId::DataColumnsByRange(id), peer_id, data_column: None, seen_timestamp: D, @@ -459,7 +459,8 @@ fn build_rpc_block( ) .unwrap() } - None => RpcBlock::new_without_blobs(None, block), + // Block has no data, expects zero columns + None => RpcBlock::new_without_blobs(None, block, 0), } } diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 49ef5c279c..67c24b9c7a 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -1,6 +1,6 @@ use crate::AttestationStats; use itertools::Itertools; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use types::{ attestation::{AttestationBase, AttestationElectra}, superstruct, AggregateSignature, Attestation, AttestationData, BeaconState, BitList, BitVector, @@ -119,6 +119,18 @@ impl CompactAttestationRef<'_, E> { } } + pub fn get_committee_indices_map(&self) -> HashSet { + match self.indexed { + CompactIndexedAttestation::Base(_) => HashSet::from([self.data.index]), + CompactIndexedAttestation::Electra(indexed_att) => indexed_att + .committee_bits + .iter() + .enumerate() + .filter_map(|(index, bit)| if bit { Some(index as u64) } else { None }) + .collect(), + } + } + pub fn clone_as_attestation(&self) -> Attestation { match self.indexed { CompactIndexedAttestation::Base(indexed_att) => Attestation::Base(AttestationBase { @@ -268,7 +280,11 @@ impl CompactIndexedAttestationElectra { } pub fn committee_index(&self) -> Option { - self.get_committee_indices().first().copied() + self.committee_bits + .iter() + .enumerate() + .find(|&(_, bit)| bit) + .map(|(index, _)| index as u64) } pub fn get_committee_indices(&self) -> Vec { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 584a5f9f32..ec8c6640b1 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1,5 +1,5 @@ mod attestation; -mod attestation_storage; +pub mod attestation_storage; mod attester_slashing; mod bls_to_execution_changes; mod max_cover; @@ -47,7 +47,7 @@ type SyncContributions = RwLock { /// Map from attestation ID (see below) to vectors of attestations. - attestations: RwLock>, + pub attestations: RwLock>, /// Map from sync aggregate ID to the best `SyncCommitteeContribution`s seen for that ID. sync_contributions: SyncContributions, /// Set of attester slashings, and the fork version they were verified against. @@ -673,12 +673,12 @@ impl OperationPool { /// This method may return objects that are invalid for block inclusion. pub fn get_filtered_attestations(&self, filter: F) -> Vec> where - F: Fn(&AttestationData) -> bool, + F: Fn(&AttestationData, HashSet) -> bool, { self.attestations .read() .iter() - .filter(|att| filter(&att.attestation_data())) + .filter(|att| filter(&att.attestation_data(), att.get_committee_indices_map())) .map(|att| att.clone_as_attestation()) .collect() } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 8723c2d708..e887aa9abc 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -661,10 +661,7 @@ pub fn get_config( }; } - client_config.chain.max_network_size = lighthouse_network::gossip_max_size( - spec.bellatrix_fork_epoch.is_some(), - spec.gossip_max_size as usize, - ); + client_config.chain.max_network_size = spec.max_payload_size as usize; if cli_args.get_flag("slasher") { let slasher_dir = if let Some(slasher_dir) = cli_args.get_one::("slasher-dir") { diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index d17a8f04d6..908f0759a9 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -22,7 +22,7 @@ directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } itertools = { workspace = true } -leveldb = { version = "0.8.6", optional = true } +leveldb = { version = "0.8.6", optional = true, default-features = false } logging = { workspace = true } lru = { workspace = true } metrics = { workspace = true } diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 3d8bbe1473..81d6d1d4bd 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -195,7 +195,6 @@ impl LevelDB { }; for (start_key, end_key) in [ - endpoints(DBColumn::BeaconStateTemporary), endpoints(DBColumn::BeaconState), endpoints(DBColumn::BeaconStateSummary), ] { diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 41fd17ef43..ed6154da80 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -25,7 +25,7 @@ pub enum Error { NoContinuationData, SplitPointModified(Slot, Slot), ConfigError(StoreConfigError), - SchemaMigrationError(String), + MigrationError(String), /// The store's `anchor_info` was mutated concurrently, the latest modification wasn't applied. AnchorInfoConcurrentMutation, /// The store's `blob_info` was mutated concurrently, the latest modification wasn't applied. diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs deleted file mode 100644 index 586db44c89..0000000000 --- a/beacon_node/store/src/garbage_collection.rs +++ /dev/null @@ -1,36 +0,0 @@ -//! Garbage collection process that runs at start-up to clean up the database. -use crate::database::interface::BeaconNodeBackend; -use crate::hot_cold_store::HotColdDB; -use crate::{DBColumn, Error}; -use tracing::debug; -use types::EthSpec; - -impl HotColdDB, BeaconNodeBackend> -where - E: EthSpec, -{ - /// Clean up the database by performing one-off maintenance at start-up. - pub fn remove_garbage(&self) -> Result<(), Error> { - self.delete_temp_states()?; - Ok(()) - } - - /// Delete the temporary states that were leftover by failed block imports. - pub fn delete_temp_states(&self) -> Result<(), Error> { - let mut ops = vec![]; - self.iter_temporary_state_roots().for_each(|state_root| { - if let Ok(state_root) = state_root { - ops.push(state_root); - } - }); - if !ops.is_empty() { - debug!("Garbage collecting {} temporary states", ops.len()); - - self.delete_batch(DBColumn::BeaconState, ops.clone())?; - self.delete_batch(DBColumn::BeaconStateSummary, ops.clone())?; - self.delete_batch(DBColumn::BeaconStateTemporary, ops)?; - } - - Ok(()) - } -} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6a30d8a428..362c5d8014 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -14,8 +14,8 @@ use crate::metadata::{ }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, metrics, parse_data_column_key, BlobSidecarListFromRoot, ColumnKeyIter, - DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, + get_data_column_key, metrics, parse_data_column_key, BlobSidecarListFromRoot, DBColumn, + DatabaseBlock, Error, ItemStore, KeyValueStoreOp, StoreItem, StoreOp, }; use itertools::{process_results, Itertools}; use lru::LruCache; @@ -36,7 +36,7 @@ use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; -use tracing::{debug, error, info, trace, warn}; +use tracing::{debug, error, info, warn}; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; use types::*; use zstd::{Decoder, Encoder}; @@ -80,7 +80,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// HTTP API. historic_state_cache: Mutex>, /// Chain spec. - pub(crate) spec: Arc, + pub spec: Arc, /// Mere vessel for E. _phantom: PhantomData, } @@ -161,7 +161,7 @@ pub enum HotColdDBError { MissingRestorePoint(Hash256), MissingColdStateSummary(Hash256), MissingHotStateSummary(Hash256), - MissingEpochBoundaryState(Hash256), + MissingEpochBoundaryState(Hash256, Hash256), MissingPrevState(Hash256), MissingSplitState(Hash256, Slot), MissingStateDiff(Hash256), @@ -390,8 +390,11 @@ impl HotColdDB, BeaconNodeBackend> { } db.store_config()?; - // Run a garbage collection pass. - db.remove_garbage()?; + // TODO(tree-states): Here we can choose to prune advanced states to reclaim disk space. As + // it's a foreground task there's no risk of race condition that can corrupt the DB. + // Advanced states for invalid blocks that were never written to the DB, or descendants of + // heads can be safely pruned at the expense of potentially having to recompute them in the + // future. However this would require a new dedicated pruning routine. // If configured, run a foreground compaction pass. if db.config.compact_on_init { @@ -402,12 +405,6 @@ impl HotColdDB, BeaconNodeBackend> { Ok(db) } - - /// Return an iterator over the state roots of all temporary states. - pub fn iter_temporary_state_roots(&self) -> ColumnKeyIter { - self.hot_db - .iter_column_keys::(DBColumn::BeaconStateTemporary) - } } impl, Cold: ItemStore> HotColdDB { @@ -903,26 +900,11 @@ impl, Cold: ItemStore> HotColdDB /// Store a state in the store. pub fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { - self.put_state_possibly_temporary(state_root, state, false) - } - - /// Store a state in the store. - /// - /// The `temporary` flag indicates whether this state should be considered canonical. - pub fn put_state_possibly_temporary( - &self, - state_root: &Hash256, - state: &BeaconState, - temporary: bool, - ) -> Result<(), Error> { let mut ops: Vec = Vec::new(); if state.slot() < self.get_split_slot() { self.store_cold_state(state_root, state, &mut ops)?; self.cold_db.do_atomically(ops) } else { - if temporary { - ops.push(TemporaryFlag.as_kv_store_op(*state_root)); - } self.store_hot_state(state_root, state, &mut ops)?; self.hot_db.do_atomically(ops) } @@ -1138,6 +1120,7 @@ impl, Cold: ItemStore> HotColdDB .load_hot_state(&epoch_boundary_state_root, true)? .ok_or(HotColdDBError::MissingEpochBoundaryState( epoch_boundary_state_root, + *state_root, ))?; Ok(Some(state)) } else { @@ -1201,17 +1184,6 @@ impl, Cold: ItemStore> HotColdDB key_value_batch.push(summary.as_kv_store_op(state_root)); } - StoreOp::PutStateTemporaryFlag(state_root) => { - key_value_batch.push(TemporaryFlag.as_kv_store_op(state_root)); - } - - StoreOp::DeleteStateTemporaryFlag(state_root) => { - key_value_batch.push(KeyValueStoreOp::DeleteKey( - TemporaryFlag::db_column(), - state_root.as_slice().to_vec(), - )); - } - StoreOp::DeleteBlock(block_root) => { key_value_batch.push(KeyValueStoreOp::DeleteKey( DBColumn::BeaconBlock, @@ -1241,13 +1213,6 @@ impl, Cold: ItemStore> HotColdDB state_root.as_slice().to_vec(), )); - // Delete the state temporary flag (if any). Temporary flags are commonly - // created by the state advance routine. - key_value_batch.push(KeyValueStoreOp::DeleteKey( - DBColumn::BeaconStateTemporary, - state_root.as_slice().to_vec(), - )); - if slot.is_none_or(|slot| slot % E::slots_per_epoch() == 0) { key_value_batch.push(KeyValueStoreOp::DeleteKey( DBColumn::BeaconState, @@ -1408,10 +1373,6 @@ impl, Cold: ItemStore> HotColdDB StoreOp::PutStateSummary(_, _) => (), - StoreOp::PutStateTemporaryFlag(_) => (), - - StoreOp::DeleteStateTemporaryFlag(_) => (), - StoreOp::DeleteBlock(block_root) => { guard.delete_block(&block_root); self.state_cache.lock().delete_block_states(&block_root); @@ -1492,8 +1453,8 @@ impl, Cold: ItemStore> HotColdDB // On the epoch boundary, store the full state. if state.slot() % E::slots_per_epoch() == 0 { - trace!( - slot = %state.slot().as_u64(), + debug!( + slot = %state.slot(), ?state_root, "Storing full state on epoch boundary" ); @@ -1571,12 +1532,6 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); - // If the state is marked as temporary, do not return it. It will become visible - // only once its transaction commits and deletes its temporary flag. - if self.load_state_temporary_flag(state_root)?.is_some() { - return Ok(None); - } - if let Some(HotStateSummary { slot, latest_block_root, @@ -1585,7 +1540,10 @@ impl, Cold: ItemStore> HotColdDB { let mut boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root, &self.spec)?.ok_or( - HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), + HotColdDBError::MissingEpochBoundaryState( + epoch_boundary_state_root, + *state_root, + ), )?; // Immediately rebase the state from disk on the finalized state so that we can reuse @@ -2545,15 +2503,16 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.get(state_root) } - /// Load the temporary flag for a state root, if one exists. - /// - /// Returns `Some` if the state is temporary, or `None` if the state is permanent or does not - /// exist -- you should call `load_hot_state_summary` to find out which. - pub fn load_state_temporary_flag( - &self, - state_root: &Hash256, - ) -> Result, Error> { - self.hot_db.get(state_root) + /// Load all hot state summaries present in the hot DB + pub fn load_hot_state_summaries(&self) -> Result, Error> { + self.hot_db + .iter_column::(DBColumn::BeaconStateSummary) + .map(|res| { + let (state_root, value) = res?; + let summary = HotStateSummary::from_ssz_bytes(&value)?; + Ok((state_root, summary)) + }) + .collect() } /// Run a compaction pass to free up space used by deleted states. @@ -2985,54 +2944,13 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } - - /// Prune states from the hot database which are prior to the split. - /// - /// This routine is important for cleaning up advanced states which are stored in the database - /// with a temporary flag. - pub fn prune_old_hot_states(&self) -> Result<(), Error> { - let split = self.get_split_info(); - debug!( - %split.slot, - "Database state pruning started" - ); - let mut state_delete_batch = vec![]; - for res in self - .hot_db - .iter_column::(DBColumn::BeaconStateSummary) - { - let (state_root, summary_bytes) = res?; - let summary = HotStateSummary::from_ssz_bytes(&summary_bytes)?; - - if summary.slot <= split.slot { - let old = summary.slot < split.slot; - let non_canonical = summary.slot == split.slot - && state_root != split.state_root - && !split.state_root.is_zero(); - if old || non_canonical { - let reason = if old { - "old dangling state" - } else { - "non-canonical" - }; - debug!( - ?state_root, - slot = %summary.slot, - %reason, - "Deleting state" - ); - state_delete_batch.push(StoreOp::DeleteState(state_root, Some(summary.slot))); - } - } - } - let num_deleted_states = state_delete_batch.len(); - self.do_atomically_with_block_and_blobs_cache(state_delete_batch)?; - debug!(%num_deleted_states, "Database state pruning complete"); - Ok(()) - } } -/// Advance the split point of the store, moving new finalized states to the freezer. +/// Advance the split point of the store, copying new finalized states to the freezer. +/// +/// This function previously did a combination of freezer migration alongside pruning. Now it is +/// *just* responsible for copying relevant data to the freezer, while pruning is implemented +/// in `prune_hot_db`. pub fn migrate_database, Cold: ItemStore>( store: Arc>, finalized_state_root: Hash256, @@ -3064,29 +2982,17 @@ pub fn migrate_database, Cold: ItemStore>( return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into()); } - let mut hot_db_ops = vec![]; let mut cold_db_block_ops = vec![]; - let mut epoch_boundary_blocks = HashSet::new(); - let mut non_checkpoint_block_roots = HashSet::new(); // Iterate in descending order until the current split slot - let state_roots = RootsIterator::new(&store, finalized_state) - .take_while(|result| match result { - Ok((_, _, slot)) => *slot >= current_split_slot, - Err(_) => true, - }) - .collect::, _>>()?; + let state_roots: Vec<_> = + process_results(RootsIterator::new(&store, finalized_state), |iter| { + iter.take_while(|(_, _, slot)| *slot >= current_split_slot) + .collect() + })?; // Then, iterate states in slot ascending order, as they are stored wrt previous states. for (block_root, state_root, slot) in state_roots.into_iter().rev() { - // Delete the execution payload if payload pruning is enabled. At a skipped slot we may - // delete the payload for the finalized block itself, but that's OK as we only guarantee - // that payloads are present for slots >= the split slot. The payload fetching code is also - // forgiving of missing payloads. - if store.config.prune_payloads { - hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); - } - // Store the slot to block root mapping. cold_db_block_ops.push(KeyValueStoreOp::PutKeyValue( DBColumn::BeaconBlockRoots, @@ -3094,44 +3000,27 @@ pub fn migrate_database, Cold: ItemStore>( block_root.as_slice().to_vec(), )); - // At a missed slot, `state_root_iter` will return the block root - // from the previous non-missed slot. This ensures that the block root at an - // epoch boundary is always a checkpoint block root. We keep track of block roots - // at epoch boundaries by storing them in the `epoch_boundary_blocks` hash set. - // We then ensure that block roots at the epoch boundary aren't included in the - // `non_checkpoint_block_roots` hash set. - if slot % E::slots_per_epoch() == 0 { - epoch_boundary_blocks.insert(block_root); - } else { - non_checkpoint_block_roots.insert(block_root); - } - - if epoch_boundary_blocks.contains(&block_root) { - non_checkpoint_block_roots.remove(&block_root); - } - - // Delete the old summary, and the full state if we lie on an epoch boundary. - hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); - // Do not try to store states if a restore point is yet to be stored, or will never be // stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state // which always needs to be copied from the hot DB to the freezer and should not be deleted. if slot != 0 && slot < anchor_info.state_upper_limit { - debug!(%slot, "Pruning finalized state"); continue; } - let mut cold_db_ops = vec![]; + let mut cold_db_state_ops = vec![]; // Only store the cold state if it's on a diff boundary. // Calling `store_cold_state_summary` instead of `store_cold_state` for those allows us // to skip loading many hot states. - if matches!( - store.hierarchy.storage_strategy(slot)?, - StorageStrategy::ReplayFrom(..) - ) { + if let StorageStrategy::ReplayFrom(from) = store.hierarchy.storage_strategy(slot)? { // Store slot -> state_root and state_root -> slot mappings. - store.store_cold_state_summary(&state_root, slot, &mut cold_db_ops)?; + debug!( + strategy = "replay", + from_slot = %from, + %slot, + "Storing cold state" + ); + store.store_cold_state_summary(&state_root, slot, &mut cold_db_state_ops)?; } else { // This is some state that we want to migrate to the freezer db. // There is no reason to cache this state. @@ -3139,36 +3028,22 @@ pub fn migrate_database, Cold: ItemStore>( .get_hot_state(&state_root, false)? .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; - store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; + store.store_cold_state(&state_root, &state, &mut cold_db_state_ops)?; } // Cold states are diffed with respect to each other, so we need to finish writing previous // states before storing new ones. - store.cold_db.do_atomically(cold_db_ops)?; + store.cold_db.do_atomically(cold_db_state_ops)?; } - // Prune sync committee branch data for all non checkpoint block roots. - // Note that `non_checkpoint_block_roots` should only contain non checkpoint block roots - // as long as `finalized_state.slot()` is at an epoch boundary. If this were not the case - // we risk the chance of pruning a `sync_committee_branch` for a checkpoint block root. - // E.g. if `current_split_slot` = (Epoch A slot 0) and `finalized_state.slot()` = (Epoch C slot 31) - // and (Epoch D slot 0) is a skipped slot, we will have pruned a `sync_committee_branch` - // for a checkpoint block root. - non_checkpoint_block_roots - .into_iter() - .for_each(|block_root| { - hot_db_ops.push(StoreOp::DeleteSyncCommitteeBranch(block_root)); - }); - - // Warning: Critical section. We have to take care not to put any of the two databases in an + // Warning: Critical section. We have to take care not to put any of the two databases in an // inconsistent state if the OS process dies at any point during the freezing // procedure. // // Since it is pretty much impossible to be atomic across more than one database, we trade - // losing track of states to delete, for consistency. In other words: We should be safe to die - // at any point below but it may happen that some states won't be deleted from the hot database - // and will remain there forever. Since dying in these particular few lines should be an - // exceedingly rare event, this should be an acceptable tradeoff. + // potentially re-doing the migration to copy data to the freezer, for consistency. If we crash + // after writing all new block & state data to the freezer but before updating the split, then + // in the worst case we will restart with the old split and re-run the migration. store.cold_db.do_atomically(cold_db_block_ops)?; store.cold_db.sync()?; { @@ -3181,7 +3056,7 @@ pub fn migrate_database, Cold: ItemStore>( error!( previous_split_slot = %current_split_slot, current_split_slot = %latest_split_slot, - "Race condition detected: Split point changed while moving states to the freezer" + "Race condition detected: Split point changed while copying states to the freezer" ); // Assume the freezing procedure will be retried in case this happens. @@ -3206,9 +3081,6 @@ pub fn migrate_database, Cold: ItemStore>( *split_guard = split; } - // Delete the blocks and states from the hot database if we got this far. - store.do_atomically_with_block_and_blobs_cache(hot_db_ops)?; - // Update the cache's view of the finalized state. store.update_finalized_state( finalized_state_root, @@ -3325,23 +3197,6 @@ impl StoreItem for ColdStateSummary { } } -#[derive(Debug, Clone, Copy, Default)] -pub struct TemporaryFlag; - -impl StoreItem for TemporaryFlag { - fn db_column() -> DBColumn { - DBColumn::BeaconStateTemporary - } - - fn as_store_bytes(&self) -> Vec { - vec![] - } - - fn from_store_bytes(_: &[u8]) -> Result { - Ok(TemporaryFlag) - } -} - #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct BytesKey { pub key: Vec, diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 2b5be03489..5b30971fd8 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -14,7 +14,6 @@ pub mod config; pub mod consensus_context; pub mod errors; mod forwards_iter; -mod garbage_collection; pub mod hdiff; pub mod historic_state_cache; pub mod hot_cold_store; @@ -241,8 +240,6 @@ pub enum StoreOp<'a, E: EthSpec> { PutBlobs(Hash256, BlobSidecarList), PutDataColumns(Hash256, DataColumnSidecarList), PutStateSummary(Hash256, HotStateSummary), - PutStateTemporaryFlag(Hash256), - DeleteStateTemporaryFlag(Hash256), DeleteBlock(Hash256), DeleteBlobs(Hash256), DeleteDataColumns(Hash256, Vec), @@ -287,8 +284,10 @@ pub enum DBColumn { /// Mapping from state root to `ColdStateSummary` in the cold DB. #[strum(serialize = "bcs")] BeaconColdStateSummary, - /// For the list of temporary states stored during block import, - /// and then made non-temporary by the deletion of their state root from this column. + /// DEPRECATED. + /// + /// Previously used for the list of temporary states stored during block import, and then made + /// non-temporary by the deletion of their state root from this column. #[strum(serialize = "bst")] BeaconStateTemporary, /// Execution payloads for blocks more recent than the finalized checkpoint. diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 1d70e105b9..55c64bf850 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(22); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(23); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 5638be0564..d1b059f3b2 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" authors = ["Sigma Prime "] edition = { workspace = true } diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 35dd806fb3..5d0ad1f45e 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -19,6 +19,7 @@ mediatype = "0.19.13" multiaddr = "0.18.2" pretty_reqwest_error = { workspace = true } proto_array = { workspace = true } +rand = { workspace = true } reqwest = { workspace = true } reqwest-eventsource = "0.5.0" sensitive_url = { workspace = true } @@ -26,6 +27,7 @@ serde = { workspace = true } serde_json = { workspace = true } slashing_protection = { workspace = true } ssz_types = { workspace = true } +test_random_derive = { path = "../../common/test_random_derive" } types = { workspace = true } zeroize = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 57fbb1a4c3..12c9da52bf 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -824,6 +824,26 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `GET beacon/states/{state_id}/pending_consolidations` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_pending_consolidations( + &self, + state_id: StateId, + ) -> Result>>, Error> + { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("pending_consolidations"); + + self.get_opt(path).await + } + /// `GET beacon/light_client/updates` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/lighthouse/sync_state.rs b/common/eth2/src/lighthouse/sync_state.rs index 0519d6f4b0..0327f7073f 100644 --- a/common/eth2/src/lighthouse/sync_state.rs +++ b/common/eth2/src/lighthouse/sync_state.rs @@ -104,8 +104,8 @@ impl std::fmt::Display for SyncState { match self { SyncState::SyncingFinalized { .. } => write!(f, "Syncing Finalized Chain"), SyncState::SyncingHead { .. } => write!(f, "Syncing Head Chain"), - SyncState::Synced { .. } => write!(f, "Synced"), - SyncState::Stalled { .. } => write!(f, "Stalled"), + SyncState::Synced => write!(f, "Synced"), + SyncState::Stalled => write!(f, "Stalled"), SyncState::SyncTransition => write!(f, "Evaluating known peers"), SyncState::BackFillSyncing { .. } => write!(f, "Syncing Historical Blocks"), } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 87ee87f183..428cc17415 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -18,7 +18,9 @@ use std::fmt::{self, Display}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use test_random_derive::TestRandom; use types::beacon_block_body::KzgCommitments; +use types::test_utils::TestRandom; pub use types::*; #[cfg(feature = "lighthouse")] @@ -802,13 +804,13 @@ pub struct LightClientUpdatesQuery { } #[derive(Encode, Decode)] -pub struct LightClientUpdateSszResponse { - pub response_chunk_len: Vec, - pub response_chunk: Vec, +pub struct LightClientUpdateResponseChunk { + pub response_chunk_len: u64, + pub response_chunk: LightClientUpdateResponseChunkInner, } #[derive(Encode, Decode)] -pub struct LightClientUpdateResponseChunk { +pub struct LightClientUpdateResponseChunkInner { pub context: [u8; 4], pub payload: Vec, } @@ -2015,11 +2017,11 @@ impl ForkVersionDeserialize for FullPayloadContents { fork_name: ForkName, ) -> Result { if fork_name.deneb_enabled() { - serde_json::from_value(value) + ExecutionPayloadAndBlobs::deserialize_by_fork::<'de, D>(value, fork_name) .map(Self::PayloadAndBlobs) .map_err(serde::de::Error::custom) } else if fork_name.bellatrix_enabled() { - serde_json::from_value(value) + ExecutionPayload::deserialize_by_fork::<'de, D>(value, fork_name) .map(Self::Payload) .map_err(serde::de::Error::custom) } else { @@ -2037,6 +2039,28 @@ pub struct ExecutionPayloadAndBlobs { pub blobs_bundle: BlobsBundle, } +impl ForkVersionDeserialize for ExecutionPayloadAndBlobs { + fn deserialize_by_fork<'de, D: Deserializer<'de>>( + value: Value, + fork_name: ForkName, + ) -> Result { + #[derive(Deserialize)] + #[serde(bound = "E: EthSpec")] + struct Helper { + execution_payload: serde_json::Value, + blobs_bundle: BlobsBundle, + } + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; + Ok(Self { + execution_payload: ExecutionPayload::deserialize_by_fork::<'de, D>( + helper.execution_payload, + fork_name, + )?, + blobs_bundle: helper.blobs_bundle, + }) + } +} + impl ForkVersionDecode for ExecutionPayloadAndBlobs { fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { let mut builder = ssz::SszDecoderBuilder::new(bytes); @@ -2067,7 +2091,7 @@ pub enum ContentType { Ssz, } -#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] #[serde(bound = "E: EthSpec")] pub struct BlobsBundle { pub commitments: KzgCommitments, @@ -2162,6 +2186,10 @@ pub struct StandardAttestationRewards { #[cfg(test)] mod test { + use std::fmt::Debug; + + use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use super::*; #[test] @@ -2175,4 +2203,107 @@ mod test { let y: ValidatorId = serde_json::from_str(pubkey_str).unwrap(); assert_eq!(serde_json::to_string(&y).unwrap(), pubkey_str); } + + #[test] + fn test_execution_payload_execution_payload_deserialize_by_fork() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + + let payloads = [ + ExecutionPayload::Bellatrix( + ExecutionPayloadBellatrix::::random_for_test(rng), + ), + ExecutionPayload::Capella(ExecutionPayloadCapella::::random_for_test( + rng, + )), + ExecutionPayload::Deneb(ExecutionPayloadDeneb::::random_for_test( + rng, + )), + ExecutionPayload::Electra(ExecutionPayloadElectra::::random_for_test( + rng, + )), + ExecutionPayload::Fulu(ExecutionPayloadFulu::::random_for_test(rng)), + ]; + let merged_forks = &ForkName::list_all()[2..]; + assert_eq!( + payloads.len(), + merged_forks.len(), + "we should test every known fork; add new fork variant to payloads above" + ); + + for (payload, &fork_name) in payloads.into_iter().zip(merged_forks) { + assert_eq!(payload.fork_name(), fork_name); + let payload_str = serde_json::to_string(&payload).unwrap(); + let mut de = serde_json::Deserializer::from_str(&payload_str); + generic_deserialize_by_fork(&mut de, payload, fork_name); + } + } + + #[test] + fn test_execution_payload_and_blobs_deserialize_by_fork() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + + let payloads = [ + { + let execution_payload = + ExecutionPayload::Deneb( + ExecutionPayloadDeneb::::random_for_test(rng), + ); + let blobs_bundle = BlobsBundle::random_for_test(rng); + ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + } + }, + { + let execution_payload = + ExecutionPayload::Electra( + ExecutionPayloadElectra::::random_for_test(rng), + ); + let blobs_bundle = BlobsBundle::random_for_test(rng); + ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + } + }, + { + let execution_payload = + ExecutionPayload::Fulu( + ExecutionPayloadFulu::::random_for_test(rng), + ); + let blobs_bundle = BlobsBundle::random_for_test(rng); + ExecutionPayloadAndBlobs { + execution_payload, + blobs_bundle, + } + }, + ]; + let blob_forks = &ForkName::list_all()[4..]; + + assert_eq!( + payloads.len(), + blob_forks.len(), + "we should test every known fork; add new fork variant to payloads above" + ); + + for (payload, &fork_name) in payloads.into_iter().zip(blob_forks) { + assert_eq!(payload.execution_payload.fork_name(), fork_name); + let payload_str = serde_json::to_string(&payload).unwrap(); + let mut de = serde_json::Deserializer::from_str(&payload_str); + generic_deserialize_by_fork(&mut de, payload, fork_name); + } + } + + fn generic_deserialize_by_fork< + 'de, + D: Deserializer<'de>, + O: ForkVersionDeserialize + PartialEq + Debug, + >( + deserializer: D, + original: O, + fork_name: ForkName, + ) { + let val = Value::deserialize(deserializer).unwrap(); + let roundtrip = O::deserialize_by_fork::<'de, D>(val, fork_name).unwrap(); + assert_eq!(original, roundtrip); + } } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 1455ec5f63..4d4ccdf717 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -100,15 +100,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0xb97036A26259B7147018913bD58a774cf91acf25 # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # 33024, ~31 days MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s @@ -150,9 +148,10 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 2 # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 256 -# DAS +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 9ff5a16198..eece34b89c 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -43,7 +43,7 @@ DENEB_FORK_VERSION: 0x04000064 DENEB_FORK_EPOCH: 889856 # 2024-03-11T18:30:20.000Z # Electra ELECTRA_FORK_VERSION: 0x05000064 -ELECTRA_FORK_EPOCH: 18446744073709551615 +ELECTRA_FORK_EPOCH: 1337856 # 2025-04-30T14:03:40.000Z # Fulu FULU_FORK_VERSION: 0x06000064 FULU_FORK_EPOCH: 18446744073709551615 @@ -97,9 +97,8 @@ DEPOSIT_CONTRACT_ADDRESS: 0x0B98057eA310F4d31F2a452B414647007d1645d9 # Network # --------------------------------------------------------------- SUBNETS_PER_NODE: 4 -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -MAX_CHUNK_SIZE: 10485760 TTFB_TIMEOUT: 5 RESP_TIMEOUT: 10 MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 @@ -118,12 +117,25 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 -# `uint64(6)` -MAX_BLOBS_PER_BLOCK: 6 +# `uint64(2)` +MAX_BLOBS_PER_BLOCK: 2 -# DAS +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**6 * 10**9 (= 64,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 64000000000 +# `2` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 2 +# `uint64(2)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 2 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 256 + +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index e5f38b8c9b..19a3f79cc0 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -88,15 +88,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s @@ -139,9 +137,10 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 9 # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 -# DAS +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml index 33eaa7e8a9..5d8df4006c 100644 --- a/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/hoodi/boot_enr.yaml @@ -11,3 +11,6 @@ - enr:-Ku4QIC89sMC0o-irosD4_23lJJ4qCGOvdUz7SmoShWx0k6AaxCFTKviEHa-sa7-EzsiXpDp0qP0xzX6nKdXJX3X-IQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhIbRilSJc2VjcDI1NmsxoQK_m0f1DzDc9Cjrspm36zuRa7072HSiMGYWLsKiVSbP34N1ZHCCIyk - enr:-Ku4QNkWjw5tNzo8DtWqKm7CnDdIq_y7xppD6c1EZSwjB8rMOkSFA1wJPLoKrq5UvA7wcxIotH6Usx3PAugEN2JMncIBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpBd9cEGEAAJEP__________gmlkgnY0gmlwhIbHuBeJc2VjcDI1NmsxoQP3FwrhFYB60djwRjAoOjttq6du94DtkQuaN99wvgqaIYN1ZHCCIyk - enr:-OS4QMJGE13xEROqvKN1xnnt7U-noc51VXyM6wFMuL9LMhQDfo1p1dF_zFdS4OsnXz_vIYk-nQWnqJMWRDKvkSK6_CwDh2F0dG5ldHOIAAAAADAAAACGY2xpZW502IpMaWdodGhvdXNljDcuMC4wLWJldGEuM4RldGgykNLxmX9gAAkQAAgAAAAAAACCaWSCdjSCaXCEhse4F4RxdWljgiMqiXNlY3AyNTZrMaECef77P8k5l3PC_raLw42OAzdXfxeQ-58BJriNaqiRGJSIc3luY25ldHMAg3RjcIIjKIN1ZHCCIyg +# Teku +- enr:-LK4QDwhXMitMbC8xRiNL-XGMhRyMSOnxej-zGifjv9Nm5G8EF285phTU-CAsMHRRefZimNI7eNpAluijMQP7NDC8kEMh2F0dG5ldHOIAAAAAAAABgCEZXRoMpDS8Zl_YAAJEAAIAAAAAAAAgmlkgnY0gmlwhAOIT_SJc2VjcDI1NmsxoQMoHWNL4MAvh6YpQeM2SUjhUrLIPsAVPB8nyxbmckC6KIN0Y3CCIyiDdWRwgiMo +- enr:-LK4QPYl2HnMPQ7b1es6Nf_tFYkyya5bj9IqAKOEj2cmoqVkN8ANbJJJK40MX4kciL7pZszPHw6vLNyeC-O3HUrLQv8Mh2F0dG5ldHOIAAAAAAAAAMCEZXRoMpDS8Zl_YAAJEAAIAAAAAAAAgmlkgnY0gmlwhAMYRG-Jc2VjcDI1NmsxoQPQ35tjr6q1qUqwAnegQmYQyfqxC_6437CObkZneI9n34N0Y3CCIyiDdWRwgiMo diff --git a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml index 19d7797424..5cca1cd037 100644 --- a/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/hoodi/config.yaml @@ -93,15 +93,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 74fe727867..886e5d12ed 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -49,7 +49,7 @@ DENEB_FORK_VERSION: 0x04000000 DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC # Electra ELECTRA_FORK_VERSION: 0x05000000 -ELECTRA_FORK_EPOCH: 18446744073709551615 +ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC # Fulu FULU_FORK_VERSION: 0x06000000 FULU_FORK_EPOCH: 18446744073709551615 @@ -103,15 +103,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s @@ -142,9 +140,22 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 -# DAS +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml index 22b711861f..ba9a3e8354 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml @@ -1,6 +1,12 @@ -# EF bootnodes +# EF - enr:-Ku4QDZ_rCowZFsozeWr60WwLgOfHzv1Fz2cuMvJqN5iJzLxKtVjoIURY42X_YTokMi3IGstW5v32uSYZyGUXj9Q_IECh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhIpEe5iJc2VjcDI1NmsxoQNHTpFdaNSCEWiN_QqT396nb0PzcUpLe3OVtLph-AciBYN1ZHCCIy0 - enr:-Ku4QHRyRwEPT7s0XLYzJ_EeeWvZTXBQb4UCGy1F_3m-YtCNTtDlGsCMr4UTgo4uR89pv11uM-xq4w6GKfKhqU31hTgCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhIrFM7WJc2VjcDI1NmsxoQI4diTwChN3zAAkarf7smOHCdFb1q3DSwdiQ_Lc_FdzFIN1ZHCCIy0 - enr:-Ku4QOkvvf0u5Hg4-HhY-SJmEyft77G5h3rUM8VF_e-Hag5cAma3jtmFoX4WElLAqdILCA-UWFRN1ZCDJJVuEHrFeLkDh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhJK-AWeJc2VjcDI1NmsxoQLFcT5VE_NMiIC8Ll7GypWDnQ4UEmuzD7hF_Hf4veDJwIN1ZHCCIy0 - enr:-Ku4QH6tYsHKITYeHUu5kdfXgEZWI18EWk_2RtGOn1jBPlx2UlS_uF3Pm5Dx7tnjOvla_zs-wwlPgjnEOcQDWXey51QCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhIs7Mc6Jc2VjcDI1NmsxoQIET4Mlv9YzhrYhX_H9D7aWMemUrvki6W4J2Qo0YmFMp4N1ZHCCIy0 - enr:-Ku4QDmz-4c1InchGitsgNk4qzorWMiFUoaPJT4G0IiF8r2UaevrekND1o7fdoftNucirj7sFFTTn2-JdC2Ej0p1Mn8Ch2F0dG5ldHOIAAAAAAAAAACEZXRoMpCo_ujukAAAaf__________gmlkgnY0gmlwhKpA-liJc2VjcDI1NmsxoQMpHP5U1DK8O_JQU6FadmWbE42qEdcGlllR8HcSkkfWq4N1ZHCCIy0 + +# Teku bootnode +- enr:-KO4QP7MmB3juk8rUjJHcUoxZDU9Np4FlW0HyDEGIjSO7GD9PbSsabu7713cWSUWKDkxIypIXg1A-6lG7ySRGOMZHeGCAmuEZXRoMpDTH2GRkAAAc___________gmlkgnY0gmlwhBSoyGOJc2VjcDI1NmsxoQNta5b_bexSSwwrGW2Re24MjfMntzFd0f2SAxQtMj3ueYN0Y3CCIyiDdWRwgiMo + +# Lodestar +- enr:-KG4QJejf8KVtMeAPWFhN_P0c4efuwu1pZHELTveiXUeim6nKYcYcMIQpGxxdgT2Xp9h-M5pr9gn2NbbwEAtxzu50Y8BgmlkgnY0gmlwhEEVkQCDaXA2kCoBBPnAEJg4AAAAAAAAAAGJc2VjcDI1NmsxoQLEh_eVvk07AQABvLkTGBQTrrIOQkzouMgSBtNHIRUxOIN1ZHCCIyiEdWRwNoIjKA diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index af78332205..10be107263 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -89,15 +89,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s @@ -140,9 +138,10 @@ MAX_BLOBS_PER_BLOCK_ELECTRA: 9 # MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 -# DAS +# Fulu NUMBER_OF_COLUMNS: 128 NUMBER_OF_CUSTODY_GROUPS: 128 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 +MAX_BLOBS_PER_BLOCK_FULU: 12 diff --git a/common/eth2_wallet_manager/src/locked_wallet.rs b/common/eth2_wallet_manager/src/locked_wallet.rs index a77f9bd780..2af863a4bf 100644 --- a/common/eth2_wallet_manager/src/locked_wallet.rs +++ b/common/eth2_wallet_manager/src/locked_wallet.rs @@ -22,7 +22,7 @@ pub const LOCK_FILE: &str = ".lock"; /// /// - Control over the `.lock` file to prevent concurrent access. /// - A `next_validator` function which wraps `Wallet::next_validator`, ensuring that the wallet is -/// persisted to disk (as JSON) between each consecutive call. +/// persisted to disk (as JSON) between each consecutive call. pub struct LockedWallet { wallet_dir: PathBuf, wallet: Wallet, diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index bd5e31e3ab..b20708e7b0 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v7.0.0-beta.5-", - fallback = "Lighthouse/v7.0.0-beta.5" + prefix = "Lighthouse/v7.1.0-beta.0-", + fallback = "Lighthouse/v7.1.0-beta.0" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "7.0.0-beta.5" + "7.1.0-beta.0" } /// Returns the name of the current client running. diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index a69bc6ab23..6975e04505 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -16,8 +16,9 @@ parking_lot = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true, features = [ "time" ] } -tracing = "0.1" +tracing = { workspace = true } tracing-appender = { workspace = true } tracing-core = { workspace = true } tracing-log = { workspace = true } tracing-subscriber = { workspace = true } +workspace_members = { workspace = true } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 403f682a06..5c4de1fd61 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -1,24 +1,24 @@ -use chrono::Local; -use logroller::{Compression, LogRollerBuilder, Rotation, RotationSize}; use metrics::{try_create_int_counter, IntCounter, Result as MetricsResult}; -use std::io::Write; -use std::path::PathBuf; use std::sync::LazyLock; use std::time::{Duration, Instant}; -use tracing::Subscriber; -use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; -use tracing_subscriber::layer::Context; -use tracing_subscriber::{EnvFilter, Layer}; +use tracing_subscriber::EnvFilter; pub const MAX_MESSAGE_WIDTH: usize = 40; pub mod macros; mod sse_logging_components; +mod tracing_libp2p_discv5_logging_layer; pub mod tracing_logging_layer; mod tracing_metrics_layer; +mod utils; pub use sse_logging_components::SSELoggingComponents; +pub use tracing_libp2p_discv5_logging_layer::{ + create_libp2p_discv5_tracing_layer, Libp2pDiscv5TracingLayer, +}; +pub use tracing_logging_layer::LoggingLayer; pub use tracing_metrics_layer::MetricsLayer; +pub use utils::build_workspace_filter; /// The minimum interval between log messages indicating that a queue is full. const LOG_DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30); @@ -51,132 +51,6 @@ impl TimeLatch { } } -pub struct Libp2pDiscv5TracingLayer { - pub libp2p_non_blocking_writer: NonBlocking, - pub _libp2p_guard: WorkerGuard, - pub discv5_non_blocking_writer: NonBlocking, - pub _discv5_guard: WorkerGuard, -} - -impl Layer for Libp2pDiscv5TracingLayer -where - S: Subscriber, -{ - fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context) { - let meta = event.metadata(); - let log_level = meta.level(); - let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); - - let target = match meta.target().split_once("::") { - Some((crate_name, _)) => crate_name, - None => "unknown", - }; - - let mut writer = match target { - "gossipsub" => self.libp2p_non_blocking_writer.clone(), - "discv5" => self.discv5_non_blocking_writer.clone(), - _ => return, - }; - - let mut visitor = LogMessageExtractor { - message: String::default(), - }; - - event.record(&mut visitor); - let message = format!("{} {} {}\n", timestamp, log_level, visitor.message); - - if let Err(e) = writer.write_all(message.as_bytes()) { - eprintln!("Failed to write log: {}", e); - } - } -} - -struct LogMessageExtractor { - message: String, -} - -impl tracing_core::field::Visit for LogMessageExtractor { - fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) { - self.message = format!("{} {:?}", self.message, value); - } -} - -pub fn create_libp2p_discv5_tracing_layer( - base_tracing_log_path: Option, - max_log_size: u64, - compression: bool, - max_log_number: usize, -) -> Libp2pDiscv5TracingLayer { - if let Some(mut tracing_log_path) = base_tracing_log_path { - // Ensure that `tracing_log_path` only contains directories. - for p in tracing_log_path.clone().iter() { - tracing_log_path = tracing_log_path.join(p); - if let Ok(metadata) = tracing_log_path.metadata() { - if !metadata.is_dir() { - tracing_log_path.pop(); - break; - } - } - } - - let mut libp2p_writer = - LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log")) - .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) - .max_keep_files(max_log_number.try_into().unwrap_or_else(|e| { - eprintln!("Failed to convert max_log_number to u64: {}", e); - 10 - })); - - let mut discv5_writer = - LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log")) - .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) - .max_keep_files(max_log_number.try_into().unwrap_or_else(|e| { - eprintln!("Failed to convert max_log_number to u64: {}", e); - 10 - })); - - if compression { - libp2p_writer = libp2p_writer.compression(Compression::Gzip); - discv5_writer = discv5_writer.compression(Compression::Gzip); - } - - let libp2p_writer = match libp2p_writer.build() { - Ok(writer) => writer, - Err(e) => { - eprintln!("Failed to initialize libp2p rolling file appender: {e}"); - std::process::exit(1); - } - }; - - let discv5_writer = match discv5_writer.build() { - Ok(writer) => writer, - Err(e) => { - eprintln!("Failed to initialize discv5 rolling file appender: {e}"); - std::process::exit(1); - } - }; - - let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer); - let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer); - - Libp2pDiscv5TracingLayer { - libp2p_non_blocking_writer, - _libp2p_guard, - discv5_non_blocking_writer, - _discv5_guard, - } - } else { - let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(std::io::sink()); - let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(std::io::sink()); - Libp2pDiscv5TracingLayer { - libp2p_non_blocking_writer, - _libp2p_guard, - discv5_non_blocking_writer, - _discv5_guard, - } - } -} - /// Return a tracing subscriber suitable for test usage. /// /// By default no logs will be printed, but they can be enabled via diff --git a/common/logging/src/tracing_libp2p_discv5_logging_layer.rs b/common/logging/src/tracing_libp2p_discv5_logging_layer.rs new file mode 100644 index 0000000000..90033d11ad --- /dev/null +++ b/common/logging/src/tracing_libp2p_discv5_logging_layer.rs @@ -0,0 +1,113 @@ +use chrono::Local; +use logroller::{LogRollerBuilder, Rotation, RotationSize}; +use std::io::Write; +use std::path::PathBuf; +use tracing::Subscriber; +use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; +use tracing_subscriber::{layer::Context, Layer}; + +pub struct Libp2pDiscv5TracingLayer { + pub libp2p_non_blocking_writer: NonBlocking, + _libp2p_guard: WorkerGuard, + pub discv5_non_blocking_writer: NonBlocking, + _discv5_guard: WorkerGuard, +} + +impl Layer for Libp2pDiscv5TracingLayer +where + S: Subscriber, +{ + fn on_event(&self, event: &tracing::Event<'_>, _ctx: Context) { + let meta = event.metadata(); + let log_level = meta.level(); + let timestamp = Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); + + let target = match meta.target().split_once("::") { + Some((crate_name, _)) => crate_name, + None => "unknown", + }; + + let mut writer = match target { + "libp2p_gossipsub" => self.libp2p_non_blocking_writer.clone(), + "discv5" => self.discv5_non_blocking_writer.clone(), + _ => return, + }; + + let mut visitor = LogMessageExtractor { + message: String::default(), + }; + + event.record(&mut visitor); + let message = format!("{} {} {}\n", timestamp, log_level, visitor.message); + + if let Err(e) = writer.write_all(message.as_bytes()) { + eprintln!("Failed to write log: {}", e); + } + } +} + +struct LogMessageExtractor { + message: String, +} + +impl tracing_core::field::Visit for LogMessageExtractor { + fn record_debug(&mut self, _: &tracing_core::Field, value: &dyn std::fmt::Debug) { + self.message = format!("{} {:?}", self.message, value); + } +} + +pub fn create_libp2p_discv5_tracing_layer( + base_tracing_log_path: Option, + max_log_size: u64, +) -> Option { + if let Some(mut tracing_log_path) = base_tracing_log_path { + // Ensure that `tracing_log_path` only contains directories. + for p in tracing_log_path.clone().iter() { + tracing_log_path = tracing_log_path.join(p); + if let Ok(metadata) = tracing_log_path.metadata() { + if !metadata.is_dir() { + tracing_log_path.pop(); + break; + } + } + } + + let libp2p_writer = + LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("libp2p.log")) + .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) + .max_keep_files(1); + + let discv5_writer = + LogRollerBuilder::new(tracing_log_path.clone(), PathBuf::from("discv5.log")) + .rotation(Rotation::SizeBased(RotationSize::MB(max_log_size))) + .max_keep_files(1); + + let libp2p_writer = match libp2p_writer.build() { + Ok(writer) => writer, + Err(e) => { + eprintln!("Failed to initialize libp2p rolling file appender: {e}"); + std::process::exit(1); + } + }; + + let discv5_writer = match discv5_writer.build() { + Ok(writer) => writer, + Err(e) => { + eprintln!("Failed to initialize discv5 rolling file appender: {e}"); + std::process::exit(1); + } + }; + + let (libp2p_non_blocking_writer, _libp2p_guard) = NonBlocking::new(libp2p_writer); + let (discv5_non_blocking_writer, _discv5_guard) = NonBlocking::new(discv5_writer); + + Some(Libp2pDiscv5TracingLayer { + libp2p_non_blocking_writer, + _libp2p_guard, + discv5_non_blocking_writer, + _discv5_guard, + }) + } else { + None + } +} diff --git a/common/logging/src/tracing_logging_layer.rs b/common/logging/src/tracing_logging_layer.rs index 4478e1facb..c3784a8f62 100644 --- a/common/logging/src/tracing_logging_layer.rs +++ b/common/logging/src/tracing_logging_layer.rs @@ -1,3 +1,5 @@ +use crate::utils::is_ascii_control; + use chrono::prelude::*; use serde_json::{Map, Value}; use std::collections::HashMap; @@ -11,16 +13,16 @@ use tracing_subscriber::layer::Context; use tracing_subscriber::registry::LookupSpan; use tracing_subscriber::Layer; +const FIXED_MESSAGE_WIDTH: usize = 44; +const ALIGNED_LEVEL_WIDTH: usize = 5; + pub struct LoggingLayer { pub non_blocking_writer: NonBlocking, - pub guard: WorkerGuard, + _guard: WorkerGuard, pub disable_log_timestamp: bool, pub log_color: bool, - pub logfile_color: bool, pub log_format: Option, - pub logfile_format: Option, pub extra_info: bool, - pub dep_logs: bool, span_fields: Arc>>, } @@ -28,25 +30,19 @@ impl LoggingLayer { #[allow(clippy::too_many_arguments)] pub fn new( non_blocking_writer: NonBlocking, - guard: WorkerGuard, + _guard: WorkerGuard, disable_log_timestamp: bool, log_color: bool, - logfile_color: bool, log_format: Option, - logfile_format: Option, extra_info: bool, - dep_logs: bool, ) -> Self { Self { non_blocking_writer, - guard, + _guard, disable_log_timestamp, log_color, - logfile_color, log_format, - logfile_format, extra_info, - dep_logs, span_fields: Arc::new(Mutex::new(HashMap::new())), } } @@ -84,16 +80,6 @@ where String::new() }; - if !self.dep_logs { - if let Some(file) = meta.file() { - if file.contains("/.cargo/") { - return; - } - } else { - return; - } - } - let mut writer = self.non_blocking_writer.clone(); let mut visitor = LogMessageExtractor { @@ -122,16 +108,10 @@ where None => "".to_string(), }; - if module.contains("discv5") { - visitor - .fields - .push(("service".to_string(), "\"discv5\"".to_string())); - } - let gray = "\x1b[90m"; let reset = "\x1b[0m"; let location = if self.extra_info { - if self.logfile_color { + if self.log_color { format!("{}{}::{}:{}{}", gray, module, file, line, reset) } else { format!("{}::{}:{}", module, file, line) @@ -164,33 +144,16 @@ where } }; - if self.dep_logs { - if self.logfile_format.as_deref() == Some("JSON") { - build_json_log_file( - &visitor, - plain_level_str, - meta, - &ctx, - &self.span_fields, - event, - &mut writer, - ); - } else { - build_log_text( - &visitor, - plain_level_str, - ×tamp, - &ctx, - &self.span_fields, - event, - &location, - color_level_str, - self.logfile_color, - &mut writer, - ); - } - } else if self.log_format.as_deref() == Some("JSON") { - build_json_log_stdout(&visitor, plain_level_str, ×tamp, &mut writer); + if self.log_format.as_deref() == Some("JSON") { + build_log_json( + &visitor, + plain_level_str, + meta, + &ctx, + &self.span_fields, + event, + &mut writer, + ); } else { build_log_text( &visitor, @@ -300,49 +263,7 @@ impl tracing_core::field::Visit for LogMessageExtractor { } } -/// Function to filter out ascii control codes. -/// -/// This helps to keep log formatting consistent. -/// Whitespace and padding control codes are excluded. -fn is_ascii_control(character: &u8) -> bool { - matches!( - character, - b'\x00'..=b'\x08' | - b'\x0b'..=b'\x0c' | - b'\x0e'..=b'\x1f' | - b'\x7f' | - b'\x81'..=b'\x9f' - ) -} - -fn build_json_log_stdout( - visitor: &LogMessageExtractor, - plain_level_str: &str, - timestamp: &str, - writer: &mut impl Write, -) { - let mut log_map = Map::new(); - log_map.insert("msg".to_string(), Value::String(visitor.message.clone())); - log_map.insert( - "level".to_string(), - Value::String(plain_level_str.to_string()), - ); - log_map.insert("ts".to_string(), Value::String(timestamp.to_string())); - - for (key, val) in visitor.fields.clone().into_iter() { - let parsed_val = parse_field(&val); - log_map.insert(key, parsed_val); - } - - let json_obj = Value::Object(log_map); - let output = format!("{}\n", json_obj); - - if let Err(e) = writer.write_all(output.as_bytes()) { - eprintln!("Failed to write log: {}", e); - } -} - -fn build_json_log_file<'a, S>( +fn build_log_json<'a, S>( visitor: &LogMessageExtractor, plain_level_str: &str, meta: &tracing::Metadata<'_>, @@ -450,13 +371,18 @@ fn build_log_text<'a, S>( } } - let level_str = if use_color { - color_level_str + let pad = if plain_level_str.len() < ALIGNED_LEVEL_WIDTH { + " " } else { - plain_level_str + "" + }; + + let level_str = if use_color { + format!("{}{}", color_level_str, pad) + } else { + format!("{}{}", plain_level_str, pad) }; - let fixed_message_width = 44; let message_len = visitor.message.len(); let message_content = if use_color { @@ -465,7 +391,7 @@ fn build_log_text<'a, S>( visitor.message.clone() }; - let padded_message = if message_len < fixed_message_width { + let padded_message = if message_len < FIXED_MESSAGE_WIDTH { let extra_color_len = if use_color { bold_start.len() + bold_end.len() } else { @@ -474,7 +400,7 @@ fn build_log_text<'a, S>( format!( "{: Result bool + Clone>, String> { + let workspace_crates: HashSet<&str> = WORKSPACE_CRATES.iter().copied().collect(); + + Ok(tracing_subscriber::filter::FilterFn::new(move |metadata| { + let target_crate = metadata.target().split("::").next().unwrap_or(""); + workspace_crates.contains(target_crate) + })) +} + +/// Function to filter out ascii control codes. +/// +/// This helps to keep log formatting consistent. +/// Whitespace and padding control codes are excluded. +pub fn is_ascii_control(character: &u8) -> bool { + matches!( + character, + b'\x00'..=b'\x08' | + b'\x0b'..=b'\x0c' | + b'\x0e'..=b'\x1f' | + b'\x7f' | + b'\x81'..=b'\x9f' + ) +} diff --git a/common/workspace_members/Cargo.toml b/common/workspace_members/Cargo.toml new file mode 100644 index 0000000000..05924590e3 --- /dev/null +++ b/common/workspace_members/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "workspace_members" +version = "0.1.0" +edition = { workspace = true } + +[lib] +proc-macro = true + +[dependencies] +cargo_metadata = { workspace = true } +quote = { workspace = true } diff --git a/common/workspace_members/src/lib.rs b/common/workspace_members/src/lib.rs new file mode 100644 index 0000000000..1eea0e60e2 --- /dev/null +++ b/common/workspace_members/src/lib.rs @@ -0,0 +1,39 @@ +use cargo_metadata::MetadataCommand; +use proc_macro::TokenStream; +use quote::quote; +use std::error::Error; + +fn get_workspace_crates() -> Result, Box> { + let metadata = MetadataCommand::new().no_deps().exec()?; + + Ok(metadata + .workspace_members + .iter() + .filter_map(|member_id| { + metadata + .packages + .iter() + .find(|package| &package.id == member_id) + .map(|package| package.name.clone()) + }) + .collect()) +} + +#[proc_macro] +pub fn workspace_crates(_input: TokenStream) -> TokenStream { + match get_workspace_crates() { + Ok(crate_names) => { + let crate_strs = crate_names.iter().map(|s| s.as_str()); + quote! { + &[#(#crate_strs),*] + } + } + Err(e) => { + let msg = format!("Failed to get workspace crates: {e}"); + quote! { + compile_error!(#msg); + } + } + } + .into() +} diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 5d0bee4c85..cbae54bd36 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -760,7 +760,7 @@ impl ProtoArray { /// /// - The child is already the best child but it's now invalid due to a FFG change and should be removed. /// - The child is already the best child and the parent is updated with the new - /// best-descendant. + /// best-descendant. /// - The child is not the best child but becomes the best child. /// - The child is not the best child and does not become the best child. fn maybe_update_best_child_and_descendant( @@ -1041,6 +1041,21 @@ impl ProtoArray { }) .map(|node| node.root) } + + /// Returns all nodes that have zero children and are descended from the finalized checkpoint. + /// + /// For informational purposes like the beacon HTTP API, we use this as the list of known heads, + /// even though some of them might not be viable. We do this to maintain consistency between the + /// definition of "head" used by pruning (which does not consider viability) and fork choice. + pub fn heads_descended_from_finalization(&self) -> Vec<&ProtoNode> { + self.nodes + .iter() + .filter(|node| { + node.best_child.is_none() + && self.is_finalized_checkpoint_or_descendant::(node.root) + }) + .collect() + } } /// A helper method to calculate the proposer boost based on the given `justified_balances`. diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 88d4660311..dde2411787 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -856,10 +856,18 @@ impl ProtoArrayForkChoice { } /// See `ProtoArray::iter_nodes` - pub fn iter_nodes<'a>(&'a self, block_root: &Hash256) -> Iter<'a> { + pub fn iter_nodes(&self, block_root: &Hash256) -> Iter { self.proto_array.iter_nodes(block_root) } + /// See `ProtoArray::iter_block_roots` + pub fn iter_block_roots( + &self, + block_root: &Hash256, + ) -> impl Iterator + use<'_> { + self.proto_array.iter_block_roots(block_root) + } + pub fn as_bytes(&self) -> Vec { SszContainer::from(self).as_ssz_bytes() } @@ -885,6 +893,11 @@ impl ProtoArrayForkChoice { pub fn core_proto_array_mut(&mut self) -> &mut ProtoArray { &mut self.proto_array } + + /// Returns all nodes that have zero children and are descended from the finalized checkpoint. + pub fn heads_descended_from_finalization(&self) -> Vec<&ProtoNode> { + self.proto_array.heads_descended_from_finalization::() + } } /// Returns a list of `deltas`, where there is one delta for each of the indices in @@ -1121,7 +1134,7 @@ mod test_compute_deltas { /// /// - `A` (slot 31) is the common descendant. /// - `B` (slot 33) descends from `A`, but there is a single skip slot - /// between it and `A`. + /// between it and `A`. /// - `C` (slot 32) descends from `A` and conflicts with `B`. /// /// Imagine that the `B` chain is finalized at epoch 1. This means that the diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 10723ecc51..8e62427ef1 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -123,8 +123,7 @@ pub fn initialize_beacon_state_from_eth1( // Remove intermediate Deneb fork from `state.fork`. state.fork_mut().previous_version = spec.electra_fork_version; - // TODO(electra): think about this more and determine the best way to - // do this. The spec tests will expect that the sync committees are + // The spec tests will expect that the sync committees are // calculated using the electra value for MAX_EFFECTIVE_BALANCE when // calling `initialize_beacon_state_from_eth1()`. But the sync committees // are actually calcuated back in `upgrade_to_altair()`. We need to diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 0b399bea6c..6b4a394c73 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -63,7 +63,7 @@ pub fn verify_attestation_for_state<'ctxt, E: EthSpec>( ) -> Result> { let data = attestation.data(); - // TODO(electra) choosing a validation based on the attestation's fork + // NOTE: choosing a validation based on the attestation's fork // rather than the state's fork makes this simple, but technically the spec // defines this verification based on the state's fork. match attestation { diff --git a/consensus/types/presets/mainnet/electra.yaml b/consensus/types/presets/mainnet/electra.yaml index 42afbb233e..55308d5b1c 100644 --- a/consensus/types/presets/mainnet/electra.yaml +++ b/consensus/types/presets/mainnet/electra.yaml @@ -7,44 +7,44 @@ MIN_ACTIVATION_BALANCE: 32000000000 # 2**11 * 10**9 (= 2,048,000,000,000) Gwei MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 -# State list lengths +# Rewards and penalties # --------------------------------------------------------------- -# `uint64(2**27)` (= 134,217,728) -PENDING_DEPOSITS_LIMIT: 134217728 -# `uint64(2**27)` (= 134,217,728) -PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 -# `uint64(2**18)` (= 262,144) -PENDING_CONSOLIDATIONS_LIMIT: 262144 - -# Reward and penalty quotients -# --------------------------------------------------------------- -# `uint64(2**12)` (= 4,096) +# 2**12 (= 4,096) MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: 4096 -# `uint64(2**12)` (= 4,096) +# 2**12 (= 4,096) WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 -# # Max operations per block +# State list lengths # --------------------------------------------------------------- -# `uint64(2**0)` (= 1) +# 2**27 (= 134,217,728) pending deposits +PENDING_DEPOSITS_LIMIT: 134217728 +# 2**27 (= 134,217,728) pending partial withdrawals +PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 +# 2**18 (= 262,144) pending consolidations +PENDING_CONSOLIDATIONS_LIMIT: 262144 + +# Max operations per block +# --------------------------------------------------------------- +# 2**0 (= 1) attester slashings MAX_ATTESTER_SLASHINGS_ELECTRA: 1 -# `uint64(2**3)` (= 8) +# 2**3 (= 8) attestations MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**1)` (= 2) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# 2**13 (= 8192) deposit requests +# 2**13 (= 8,192) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 +# 2**1 (= 2) consolidation requests +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Withdrawals processing # --------------------------------------------------------------- -# 2**3 ( = 8) pending withdrawals +# 2**3 (= 8) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 # Pending deposits processing # --------------------------------------------------------------- -# 2**4 ( = 4) pending deposits +# 2**4 (= 16) pending deposits MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/minimal/electra.yaml b/consensus/types/presets/minimal/electra.yaml index 44e4769756..f99effe0f1 100644 --- a/consensus/types/presets/minimal/electra.yaml +++ b/consensus/types/presets/minimal/electra.yaml @@ -7,44 +7,44 @@ MIN_ACTIVATION_BALANCE: 32000000000 # 2**11 * 10**9 (= 2,048,000,000,000) Gwei MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 -# State list lengths +# Rewards and penalties # --------------------------------------------------------------- -# `uint64(2**27)` (= 134,217,728) -PENDING_DEPOSITS_LIMIT: 134217728 -# [customized] `uint64(2**6)` (= 64) -PENDING_PARTIAL_WITHDRAWALS_LIMIT: 64 -# [customized] `uint64(2**6)` (= 64) -PENDING_CONSOLIDATIONS_LIMIT: 64 - -# Reward and penalty quotients -# --------------------------------------------------------------- -# `uint64(2**12)` (= 4,096) +# 2**12 (= 4,096) MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: 4096 -# `uint64(2**12)` (= 4,096) +# 2**12 (= 4,096) WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 -# # Max operations per block +# State list lengths # --------------------------------------------------------------- -# `uint64(2**0)` (= 1) +# 2**27 (= 134,217,728) pending deposits +PENDING_DEPOSITS_LIMIT: 134217728 +# [customized] 2**6 (= 64) pending partial withdrawals +PENDING_PARTIAL_WITHDRAWALS_LIMIT: 64 +# [customized] 2**6 (= 64) pending consolidations +PENDING_CONSOLIDATIONS_LIMIT: 64 + +# Max operations per block +# --------------------------------------------------------------- +# 2**0 (= 1) attester slashings MAX_ATTESTER_SLASHINGS_ELECTRA: 1 -# `uint64(2**3)` (= 8) +# 2**3 (= 8) attestations MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**1)` (= 2) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# [customized] +# [customized] 2**2 (= 4) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 4 # [customized] 2**1 (= 2) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2 +# 2**1 (= 2) consolidation requests +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Withdrawals processing # --------------------------------------------------------------- -# 2**1 ( = 2) pending withdrawals +# 2**1 (= 2) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 2 # Pending deposits processing # --------------------------------------------------------------- -# 2**4 ( = 4) pending deposits +# 2**4 (= 16) pending deposits MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 5d147f1e86..e769057182 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -5,6 +5,7 @@ use derivative::Derivative; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::BitVector; +use std::collections::HashSet; use std::hash::{Hash, Hasher}; use superstruct::superstruct; use test_random_derive::TestRandom; @@ -210,6 +211,13 @@ impl Attestation { } } + pub fn get_committee_indices_map(&self) -> HashSet { + match self { + Attestation::Base(att) => HashSet::from([att.data.index]), + Attestation::Electra(att) => att.get_committee_indices().into_iter().collect(), + } + } + pub fn is_aggregation_bits_zero(&self) -> bool { match self { Attestation::Base(att) => att.aggregation_bits.is_zero(), @@ -293,7 +301,11 @@ impl AttestationRef<'_, E> { impl AttestationElectra { pub fn committee_index(&self) -> Option { - self.get_committee_indices().first().cloned() + self.committee_bits + .iter() + .enumerate() + .find(|&(_, bit)| bit) + .map(|(index, _)| index as u64) } pub fn get_aggregation_bits(&self) -> Vec { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 1650001db6..2b29ef1f10 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -210,10 +210,9 @@ pub struct ChainSpec { pub boot_nodes: Vec, pub network_id: u8, pub target_aggregators_per_committee: u64, - pub gossip_max_size: u64, + pub max_payload_size: u64, max_request_blocks: u64, pub min_epochs_for_block_requests: u64, - pub max_chunk_size: u64, pub ttfb_timeout: u64, pub resp_timeout: u64, pub attestation_propagation_slot_range: u64, @@ -241,6 +240,11 @@ pub struct ChainSpec { blob_sidecar_subnet_count_electra: u64, max_request_blob_sidecars_electra: u64, + /* + * Networking Fulu + */ + max_blobs_per_block_fulu: u64, + /* * Networking Derived * @@ -656,7 +660,9 @@ impl ChainSpec { /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 { - if fork_name.electra_enabled() { + if fork_name.fulu_enabled() { + self.max_blobs_per_block_fulu + } else if fork_name.electra_enabled() { self.max_blobs_per_block_electra } else { self.max_blobs_per_block @@ -716,6 +722,35 @@ impl ChainSpec { (0..self.data_column_sidecar_subnet_count).map(DataColumnSubnetId::new) } + /// Worst-case compressed length for a given payload of size n when using snappy. + /// + /// https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 + /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#max_compressed_len + fn max_compressed_len_snappy(n: usize) -> Option { + 32_usize.checked_add(n)?.checked_add(n / 6) + } + + /// Max compressed length of a message that we receive over gossip. + pub fn max_compressed_len(&self) -> usize { + Self::max_compressed_len_snappy(self.max_payload_size as usize) + .expect("should not overflow") + } + + /// Max allowed size of a raw, compressed message received over the network. + /// + /// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#max_compressed_len + pub fn max_message_size(&self) -> usize { + std::cmp::max( + // 1024 to account for framing + encoding overhead + Self::max_compressed_len_snappy(self.max_payload_size as usize) + .expect("should not overflow") + .safe_add(1024) + .expect("should not overflow"), + //1MB + 1024 * 1024, + ) + } + /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. pub fn mainnet() -> Self { Self { @@ -883,7 +918,7 @@ impl ChainSpec { * Electra hard fork params */ electra_fork_version: [0x05, 00, 00, 00], - electra_fork_epoch: None, + electra_fork_epoch: Some(Epoch::new(364032)), unset_deposit_requests_start_index: u64::MAX, full_exit_request_amount: 0, min_activation_balance: option_wrapper(|| { @@ -930,9 +965,8 @@ impl ChainSpec { subnets_per_node: 2, maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - gossip_max_size: default_gossip_max_size(), + max_payload_size: default_max_payload_size(), min_epochs_for_block_requests: default_min_epochs_for_block_requests(), - max_chunk_size: default_max_chunk_size(), ttfb_timeout: default_ttfb_timeout(), resp_timeout: default_resp_timeout(), message_domain_invalid_snappy: default_message_domain_invalid_snappy(), @@ -965,6 +999,11 @@ impl ChainSpec { blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(), max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(), + /* + * Networking Fulu specific + */ + max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), + /* * Application specific */ @@ -1213,7 +1252,7 @@ impl ChainSpec { * Electra hard fork params */ electra_fork_version: [0x05, 0x00, 0x00, 0x64], - electra_fork_epoch: None, + electra_fork_epoch: Some(Epoch::new(1337856)), unset_deposit_requests_start_index: u64::MAX, full_exit_request_amount: 0, min_activation_balance: option_wrapper(|| { @@ -1235,7 +1274,7 @@ impl ChainSpec { }) .expect("calculation does not overflow"), max_per_epoch_activation_exit_churn_limit: option_wrapper(|| { - u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) + u64::checked_pow(2, 6)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), @@ -1260,9 +1299,8 @@ impl ChainSpec { subnets_per_node: 4, // Make this larger than usual to avoid network damage maximum_gossip_clock_disparity_millis: default_maximum_gossip_clock_disparity_millis(), target_aggregators_per_committee: 16, - gossip_max_size: default_gossip_max_size(), + max_payload_size: default_max_payload_size(), min_epochs_for_block_requests: 33024, - max_chunk_size: default_max_chunk_size(), ttfb_timeout: default_ttfb_timeout(), resp_timeout: default_resp_timeout(), message_domain_invalid_snappy: default_message_domain_invalid_snappy(), @@ -1278,7 +1316,7 @@ impl ChainSpec { max_request_data_column_sidecars: default_max_request_data_column_sidecars(), min_epochs_for_blob_sidecars_requests: 16384, blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), - max_blobs_per_block: default_max_blobs_per_block(), + max_blobs_per_block: 2, /* * Derived Deneb Specific @@ -1291,9 +1329,14 @@ impl ChainSpec { /* * Networking Electra specific */ - max_blobs_per_block_electra: default_max_blobs_per_block_electra(), - blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(), - max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(), + max_blobs_per_block_electra: 2, + blob_sidecar_subnet_count_electra: 2, + max_request_blob_sidecars_electra: 256, + + /* + * Networking Fulu specific + */ + max_blobs_per_block_fulu: default_max_blobs_per_block_fulu(), /* * Application specific @@ -1434,18 +1477,15 @@ pub struct Config { #[serde(with = "serde_utils::quoted_u64")] gas_limit_adjustment_factor: u64, - #[serde(default = "default_gossip_max_size")] + #[serde(default = "default_max_payload_size")] #[serde(with = "serde_utils::quoted_u64")] - gossip_max_size: u64, + max_payload_size: u64, #[serde(default = "default_max_request_blocks")] #[serde(with = "serde_utils::quoted_u64")] max_request_blocks: u64, #[serde(default = "default_min_epochs_for_block_requests")] #[serde(with = "serde_utils::quoted_u64")] min_epochs_for_block_requests: u64, - #[serde(default = "default_max_chunk_size")] - #[serde(with = "serde_utils::quoted_u64")] - max_chunk_size: u64, #[serde(default = "default_ttfb_timeout")] #[serde(with = "serde_utils::quoted_u64")] ttfb_timeout: u64, @@ -1517,6 +1557,9 @@ pub struct Config { #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] custody_requirement: u64, + #[serde(default = "default_max_blobs_per_block_fulu")] + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block_fulu: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1580,7 +1623,7 @@ const fn default_gas_limit_adjustment_factor() -> u64 { 1024 } -const fn default_gossip_max_size() -> u64 { +const fn default_max_payload_size() -> u64 { 10485760 } @@ -1588,10 +1631,6 @@ const fn default_min_epochs_for_block_requests() -> u64 { 33024 } -const fn default_max_chunk_size() -> u64 { - 10485760 -} - const fn default_ttfb_timeout() -> u64 { 5 } @@ -1658,6 +1697,10 @@ const fn default_max_blobs_per_block_electra() -> u64 { 9 } +const fn default_max_blobs_per_block_fulu() -> u64 { + 12 +} + const fn default_attestation_propagation_slot_range() -> u64 { 32 } @@ -1857,10 +1900,9 @@ impl Config { gas_limit_adjustment_factor: spec.gas_limit_adjustment_factor, - gossip_max_size: spec.gossip_max_size, + max_payload_size: spec.max_payload_size, max_request_blocks: spec.max_request_blocks, min_epochs_for_block_requests: spec.min_epochs_for_block_requests, - max_chunk_size: spec.max_chunk_size, ttfb_timeout: spec.ttfb_timeout, resp_timeout: spec.resp_timeout, attestation_propagation_slot_range: spec.attestation_propagation_slot_range, @@ -1886,6 +1928,7 @@ impl Config { data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, samples_per_slot: spec.samples_per_slot, custody_requirement: spec.custody_requirement, + max_blobs_per_block_fulu: spec.max_blobs_per_block_fulu, } } @@ -1938,9 +1981,8 @@ impl Config { deposit_network_id, deposit_contract_address, gas_limit_adjustment_factor, - gossip_max_size, + max_payload_size, min_epochs_for_block_requests, - max_chunk_size, ttfb_timeout, resp_timeout, message_domain_invalid_snappy, @@ -1965,6 +2007,7 @@ impl Config { data_column_sidecar_subnet_count, samples_per_slot, custody_requirement, + max_blobs_per_block_fulu, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -2009,9 +2052,8 @@ impl Config { terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, - gossip_max_size, + max_payload_size, min_epochs_for_block_requests, - max_chunk_size, ttfb_timeout, resp_timeout, message_domain_invalid_snappy, @@ -2048,6 +2090,7 @@ impl Config { data_column_sidecar_subnet_count, samples_per_slot, custody_requirement, + max_blobs_per_block_fulu, ..chain_spec.clone() }) @@ -2311,9 +2354,8 @@ mod yaml_tests { check_default!(terminal_block_hash); check_default!(terminal_block_hash_activation_epoch); check_default!(bellatrix_fork_version); - check_default!(gossip_max_size); + check_default!(max_payload_size); check_default!(min_epochs_for_block_requests); - check_default!(max_chunk_size); check_default!(ttfb_timeout); check_default!(resp_timeout); check_default!(message_domain_invalid_snappy); @@ -2339,4 +2381,17 @@ mod yaml_tests { [0, 0, 0, 1] ); } + + #[test] + fn test_max_network_limits_overflow() { + let mut spec = MainnetEthSpec::default_spec(); + // Should not overflow + let _ = spec.max_message_size(); + let _ = spec.max_compressed_len(); + + spec.max_payload_size *= 10; + // Should not overflow even with a 10x increase in max + let _ = spec.max_message_size(); + let _ = spec.max_compressed_len(); + } } diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 90a914dfae..03ab6a74f8 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,7 +1,7 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::test_utils::TestRandom; use crate::BeaconStateError; -use crate::{BeaconBlockHeader, Epoch, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; +use crate::{BeaconBlockHeader, Epoch, EthSpec, Hash256, SignedBeaconBlockHeader, Slot}; use bls::Signature; use derivative::Derivative; use kzg::Error as KzgError; @@ -56,7 +56,7 @@ pub struct DataColumnSidecar { pub column: DataColumn, /// All the KZG commitments and proofs associated with the block, used for verifying sample cells. pub kzg_commitments: KzgCommitments, - pub kzg_proofs: KzgProofs, + pub kzg_proofs: VariableList, pub signed_block_header: SignedBeaconBlockHeader, /// An inclusion proof, proving the inclusion of `blob_kzg_commitments` in `BeaconBlockBody`. pub kzg_commitments_inclusion_proof: FixedVector, diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 0bc074072f..6f1b3e6ce6 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -4,8 +4,8 @@ use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ bit::B0, UInt, U0, U1, U10, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, - U134217728, U16, U16777216, U17, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, - U65536, U8, U8192, + U134217728, U16, U16777216, U17, U2, U2048, U256, U262144, U32, U33554432, U4, U4096, U512, + U625, U64, U65536, U8, U8192, }; use std::fmt::{self, Debug}; use std::str::FromStr; @@ -146,6 +146,11 @@ pub trait EthSpec: /// Must be set to `BytesPerFieldElement * FieldElementsPerCell`. type BytesPerCell: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /// The maximum number of cell commitments per block + /// + /// FieldElementsPerExtBlob * MaxBlobCommitmentsPerBlock + type MaxCellsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* * New in Electra */ @@ -421,6 +426,7 @@ impl EthSpec for MainnetEthSpec { type FieldElementsPerExtBlob = U8192; type BytesPerBlob = U131072; type BytesPerCell = U2048; + type MaxCellsPerBlock = U33554432; type KzgCommitmentInclusionProofDepth = U17; type KzgCommitmentsInclusionProofDepth = U4; // inclusion of the whole list of commitments type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count @@ -474,6 +480,7 @@ impl EthSpec for MinimalEthSpec { type MaxWithdrawalRequestsPerPayload = U2; type FieldElementsPerCell = U64; type FieldElementsPerExtBlob = U8192; + type MaxCellsPerBlock = U33554432; type BytesPerCell = U2048; type KzgCommitmentsInclusionProofDepth = U4; @@ -566,6 +573,7 @@ impl EthSpec for GnosisEthSpec { type MaxPendingDepositsPerEpoch = U16; type FieldElementsPerCell = U64; type FieldElementsPerExtBlob = U8192; + type MaxCellsPerBlock = U33554432; type BytesPerCell = U2048; type KzgCommitmentsInclusionProofDepth = U4; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 73a50b4ef3..1d39c89cab 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -272,7 +272,14 @@ pub type Address = fixed_bytes::Address; pub type ForkVersion = [u8; 4]; pub type BLSFieldElement = Uint256; pub type Blob = FixedVector::BytesPerBlob>; -pub type KzgProofs = VariableList::MaxBlobCommitmentsPerBlock>; +// Note on List limit: +// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` +// - Fulu: `MaxCellsPerBlock` +// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to +// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types +// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, +// which we don't current do on `KzgProofs` anyway. +pub type KzgProofs = VariableList::MaxCellsPerBlock>; pub type VersionedHash = Hash256; pub type Hash64 = alloy_primitives::B64; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 707d2d4697..d025c72eac 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -227,28 +227,36 @@ pub struct ElectraPreset { pub min_activation_balance: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance_electra: u64, + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_electra: u64, #[serde(with = "serde_utils::quoted_u64")] pub whistleblower_reward_quotient_electra: u64, - #[serde(with = "serde_utils::quoted_u64")] - pub max_pending_partials_per_withdrawals_sweep: u64, + #[serde(with = "serde_utils::quoted_u64")] pub pending_deposits_limit: u64, #[serde(with = "serde_utils::quoted_u64")] pub pending_partial_withdrawals_limit: u64, #[serde(with = "serde_utils::quoted_u64")] pub pending_consolidations_limit: u64, - #[serde(with = "serde_utils::quoted_u64")] - pub max_consolidation_requests_per_payload: u64, - #[serde(with = "serde_utils::quoted_u64")] - pub max_deposit_requests_per_payload: u64, + #[serde(with = "serde_utils::quoted_u64")] pub max_attester_slashings_electra: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_attestations_electra: u64, + + #[serde(with = "serde_utils::quoted_u64")] + pub max_deposit_requests_per_payload: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_withdrawal_requests_per_payload: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub max_consolidation_requests_per_payload: u64, + + #[serde(with = "serde_utils::quoted_u64")] + pub max_pending_partials_per_withdrawals_sweep: u64, + + #[serde(with = "serde_utils::quoted_u64")] + pub max_pending_deposits_per_epoch: u64, } impl ElectraPreset { @@ -256,19 +264,26 @@ impl ElectraPreset { Self { min_activation_balance: spec.min_activation_balance, max_effective_balance_electra: spec.max_effective_balance_electra, + min_slashing_penalty_quotient_electra: spec.min_slashing_penalty_quotient_electra, whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, - max_pending_partials_per_withdrawals_sweep: spec - .max_pending_partials_per_withdrawals_sweep, + pending_deposits_limit: E::pending_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, - max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() - as u64, - max_deposit_requests_per_payload: E::max_deposit_requests_per_payload() as u64, + max_attester_slashings_electra: E::max_attester_slashings_electra() as u64, max_attestations_electra: E::max_attestations_electra() as u64, + + max_deposit_requests_per_payload: E::max_deposit_requests_per_payload() as u64, max_withdrawal_requests_per_payload: E::max_withdrawal_requests_per_payload() as u64, + max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() + as u64, + + max_pending_partials_per_withdrawals_sweep: spec + .max_pending_partials_per_withdrawals_sweep, + + max_pending_deposits_per_epoch: E::max_pending_deposits_per_epoch() as u64, } } } diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index 58983d26ec..e160332f45 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -42,8 +42,8 @@ impl SyncCommitteeContribution { /// /// - `message`: A single `SyncCommitteeMessage`. /// - `subcommittee_index`: The subcommittee this contribution pertains to out of the broader - /// sync committee. This can be determined from the `SyncSubnetId` of the gossip subnet - /// this message was seen on. + /// sync committee. This can be determined from the `SyncSubnetId` of the gossip subnet + /// this message was seen on. /// - `validator_sync_committee_index`: The index of the validator **within** the subcommittee. pub fn from_message( message: &SyncCommitteeMessage, diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index 35176d389d..e335ac7fe8 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -3,7 +3,7 @@ use smallvec::smallvec; impl TestRandom for BitList { fn random_for_test(rng: &mut impl RngCore) -> Self { - let initial_len = std::cmp::max(1, (N::to_usize() + 7) / 8); + let initial_len = std::cmp::max(1, N::to_usize().div_ceil(8)); let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); @@ -24,7 +24,7 @@ impl TestRandom for BitList { impl TestRandom for BitVector { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; + let mut raw_bytes = smallvec![0; std::cmp::max(1, N::to_usize().div_ceil(8))]; rng.fill_bytes(&mut raw_bytes); // If N isn't divisible by 8 // zero out bits greater than N diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 5aed90d2c1..027958b178 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -249,7 +249,6 @@ impl Validator { } } - /// TODO(electra): refactor these functions and make it simpler.. this is a mess /// Returns `true` if the validator is partially withdrawable. fn is_partially_withdrawable_validator_capella(&self, balance: u64, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 13b6dc2f2c..ac2d83b204 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -10,7 +10,7 @@ //! //! - `supranational`: the pure-assembly, highly optimized version from the `blst` crate. //! - `fake_crypto`: an always-returns-valid implementation that is only useful for testing -//! scenarios which intend to *ignore* real cryptography. +//! scenarios which intend to *ignore* real cryptography. //! //! This crate uses traits to reduce code-duplication between the two implementations. For example, //! the `GenericPublicKey` struct exported from this crate is generic across the `TPublicKey` trait diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 2a5c6e47f5..5d752cc0a5 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -220,7 +220,7 @@ impl Kzg { .map_err(Into::into) } - /// Computes the cells and associated proofs for a given `blob` at index `index`. + /// Computes the cells and associated proofs for a given `blob`. pub fn compute_cells_and_proofs( &self, blob: KzgBlobRef<'_>, @@ -235,11 +235,14 @@ impl Kzg { Ok((cells, c_kzg_proof)) } + /// Computes the cells for a given `blob`. + pub fn compute_cells(&self, blob: KzgBlobRef<'_>) -> Result<[Cell; CELLS_PER_EXT_BLOB], Error> { + self.context() + .compute_cells(blob) + .map_err(Error::PeerDASKZG) + } + /// Verifies a batch of cell-proof-commitment triplets. - /// - /// Here, `coordinates` correspond to the (row, col) coordinate of the cell in the extended - /// blob "matrix". In the 1D extension, row corresponds to the blob index, and col corresponds - /// to the data column index. pub fn verify_cell_proof_batch( &self, cells: &[CellRef<'_>], diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 22b19f7413..9acbe2569c 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 3774a9c458..04c8efcdba 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "7.0.0-beta.5" +version = "7.1.0-beta.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index f427836751..9b0284e06d 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -197,6 +197,13 @@ impl EnvironmentBuilder { Ok(self) } + /// Initialize the Lighthouse-specific tracing logging components from + /// the provided config. + /// + /// This consists of 3 tracing `Layers`: + /// - A `Layer` which logs to `stdout` + /// - An `Option` which logs to a log file + /// - An `Option` which emits logs to an SSE stream pub fn init_tracing( mut self, config: LoggerConfig, @@ -204,7 +211,7 @@ impl EnvironmentBuilder { ) -> ( Self, LoggingLayer, - LoggingLayer, + Option, Option, ) { let filename_prefix = match logfile_prefix { @@ -216,72 +223,48 @@ impl EnvironmentBuilder { #[cfg(target_family = "unix")] let file_mode = if config.is_restricted { 0o600 } else { 0o644 }; - let file_logging_layer = { - if let Some(path) = config.path { - let mut appender = LogRollerBuilder::new( - path.clone(), - PathBuf::from(format!("{}.log", filename_prefix)), - ) - .rotation(Rotation::SizeBased(RotationSize::MB(config.max_log_size))) - .max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| { - eprintln!("Failed to convert max_log_number to u64: {}", e); - 10 - })); + let file_logging_layer = match config.path { + None => { + eprintln!("No logfile path provided, logging to file is disabled"); + None + } + Some(_) if config.max_log_number == 0 || config.max_log_size == 0 => { + // User has explicitly disabled logging to file, so don't emit a message. + None + } + Some(path) => { + let log_filename = PathBuf::from(format!("{}.log", filename_prefix)); + let mut appender = LogRollerBuilder::new(path.clone(), log_filename) + .rotation(Rotation::SizeBased(RotationSize::MB(config.max_log_size))) + .max_keep_files(config.max_log_number.try_into().unwrap_or_else(|e| { + eprintln!("Failed to convert max_log_number to u64: {}", e); + 10 + })); if config.compression { appender = appender.compression(Compression::Gzip); } + match appender.build() { Ok(file_appender) => { #[cfg(target_family = "unix")] set_logfile_permissions(&path, filename_prefix, file_mode); - let (file_non_blocking_writer, file_guard) = - tracing_appender::non_blocking(file_appender); - - LoggingLayer::new( - file_non_blocking_writer, - file_guard, + let (writer, guard) = tracing_appender::non_blocking(file_appender); + Some(LoggingLayer::new( + writer, + guard, config.disable_log_timestamp, - false, config.logfile_color, - config.log_format.clone(), config.logfile_format.clone(), config.extra_info, - false, - ) + )) } Err(e) => { eprintln!("Failed to initialize rolling file appender: {}", e); - let (sink_writer, sink_guard) = - tracing_appender::non_blocking(std::io::sink()); - LoggingLayer::new( - sink_writer, - sink_guard, - config.disable_log_timestamp, - false, - config.logfile_color, - config.log_format.clone(), - config.logfile_format.clone(), - config.extra_info, - false, - ) + None } } - } else { - eprintln!("No path provided. File logging is disabled."); - let (sink_writer, sink_guard) = tracing_appender::non_blocking(std::io::sink()); - LoggingLayer::new( - sink_writer, - sink_guard, - config.disable_log_timestamp, - false, - true, - config.log_format.clone(), - config.logfile_format.clone(), - config.extra_info, - false, - ) } }; @@ -293,11 +276,8 @@ impl EnvironmentBuilder { stdout_guard, config.disable_log_timestamp, config.log_color, - true, config.log_format, - config.logfile_format, config.extra_info, - false, ); let sse_logging_layer_opt = if config.sse_logging { @@ -310,8 +290,8 @@ impl EnvironmentBuilder { ( self, - file_logging_layer, stdout_logging_layer, + file_logging_layer, sse_logging_layer_opt, ) } diff --git a/lighthouse/environment/src/tracing_common.rs b/lighthouse/environment/src/tracing_common.rs index 893f50dae5..dd9fe45cad 100644 --- a/lighthouse/environment/src/tracing_common.rs +++ b/lighthouse/environment/src/tracing_common.rs @@ -1,47 +1,67 @@ use crate::{EnvironmentBuilder, LoggerConfig}; use clap::ArgMatches; use logging::Libp2pDiscv5TracingLayer; -use logging::{tracing_logging_layer::LoggingLayer, SSELoggingComponents}; +use logging::{ + create_libp2p_discv5_tracing_layer, tracing_logging_layer::LoggingLayer, SSELoggingComponents, +}; use std::process; -use tracing_subscriber::filter::{FilterFn, LevelFilter}; + +use tracing_subscriber::filter::LevelFilter; use types::EthSpec; +/// Constructs all logging layers including both Lighthouse-specific and +/// dependency logging. +/// +/// The `Layer`s are as follows: +/// - A `Layer` which logs to `stdout` +/// - An `Option` which logs to a log file +/// - An `Option` which emits logs to an SSE stream +/// - An `Option` which logs relevant dependencies to their +/// own log files. (Currently only `libp2p` and `discv5`) pub fn construct_logger( logger_config: LoggerConfig, matches: &ArgMatches, environment_builder: EnvironmentBuilder, ) -> ( EnvironmentBuilder, - Libp2pDiscv5TracingLayer, - LoggingLayer, - LoggingLayer, - Option, LoggerConfig, - FilterFn, + LoggingLayer, + Option, + Option, + Option, ) { - let libp2p_discv5_layer = logging::create_libp2p_discv5_tracing_layer( - logger_config.path.clone(), - logger_config.max_log_size, - logger_config.compression, - logger_config.max_log_number, - ); + let subcommand_name = matches.subcommand_name(); + let logfile_prefix = subcommand_name.unwrap_or("lighthouse"); - let logfile_prefix = matches.subcommand_name().unwrap_or("lighthouse"); - - let (builder, file_logging_layer, stdout_logging_layer, sse_logging_layer_opt) = + let (builder, stdout_logging_layer, file_logging_layer, sse_logging_layer_opt) = environment_builder.init_tracing(logger_config.clone(), logfile_prefix); - let dependency_log_filter = - FilterFn::new(filter_dependency_log as fn(&tracing::Metadata<'_>) -> bool); + let libp2p_discv5_layer = if let Some(subcommand_name) = subcommand_name { + if subcommand_name == "beacon_node" || subcommand_name == "boot_node" { + if logger_config.max_log_size == 0 || logger_config.max_log_number == 0 { + // User has explicitly disabled logging to file. + None + } else { + create_libp2p_discv5_tracing_layer( + logger_config.path.clone(), + logger_config.max_log_size, + ) + } + } else { + // Disable libp2p and discv5 logs when running other subcommands. + None + } + } else { + None + }; ( builder, - libp2p_discv5_layer, - file_logging_layer, - stdout_logging_layer, - sse_logging_layer_opt, logger_config, - dependency_log_filter, + stdout_logging_layer, + file_logging_layer, + sse_logging_layer_opt, + libp2p_discv5_layer, ) } @@ -58,15 +78,3 @@ pub fn parse_level(level: &str) -> LevelFilter { } } } - -fn filter_dependency_log(meta: &tracing::Metadata<'_>) -> bool { - if let Some(file) = meta.file() { - let target = meta.target(); - if file.contains("/.cargo/") { - return target.contains("discv5") || target.contains("libp2p"); - } else { - return !file.contains("gossipsub") && !target.contains("hyper"); - } - } - true -} diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 34e42a61f6..3f72e2ea6c 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -87,9 +87,8 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Network # --------------------------------------------------------------- SUBNETS_PER_NODE: 2 -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -MAX_CHUNK_SIZE: 10485760 TTFB_TIMEOUT: 5 RESP_TIMEOUT: 10 MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index a6ab1cfb6b..7ddf04db01 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -17,17 +17,16 @@ use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODE use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; use lighthouse_version::VERSION; -use logging::crit; -use logging::MetricsLayer; +use logging::{build_workspace_filter, crit, MetricsLayer}; use malloc_utils::configure_memory_allocator; use std::backtrace::Backtrace; +use std::io::IsTerminal; use std::path::PathBuf; use std::process::exit; use std::sync::LazyLock; use task_executor::ShutdownReason; -use tracing::{info, warn}; -use tracing_subscriber::prelude::*; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; +use tracing::{info, warn, Level}; +use tracing_subscriber::{filter::EnvFilter, layer::SubscriberExt, util::SubscriberInitExt, Layer}; use types::{EthSpec, EthSpecId}; use validator_client::ProductionValidatorClient; @@ -69,6 +68,9 @@ fn bls_hardware_acceleration() -> bool { #[cfg(target_arch = "aarch64")] return std::arch::is_aarch64_feature_detected!("neon"); + + #[cfg(target_arch = "riscv64")] + return false; } fn allocator_name() -> String { @@ -523,10 +525,15 @@ fn run( let log_format = matches.get_one::("log-format"); - let log_color = matches - .get_one::("log-color") - .copied() - .unwrap_or(true); + let log_color = if std::io::stdin().is_terminal() { + matches + .get_one::("log-color") + .copied() + .unwrap_or(true) + } else { + // Disable color when in non-interactive mode. + false + }; let logfile_color = matches.get_flag("logfile-color"); @@ -592,12 +599,11 @@ fn run( let ( builder, - libp2p_discv5_layer, - file_logging_layer, - stdout_logging_layer, - sse_logging_layer_opt, logger_config, - dependency_log_filter, + stdout_logging_layer, + file_logging_layer, + sse_logging_layer_opt, + libp2p_discv5_layer, ) = tracing_common::construct_logger( LoggerConfig { path: log_path.clone(), @@ -619,21 +625,50 @@ fn run( environment_builder, ); - let logging = tracing_subscriber::registry() - .with(dependency_log_filter) - .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) - .with(stdout_logging_layer.with_filter(logger_config.debug_level)) - .with(MetricsLayer) - .with(libp2p_discv5_layer); + let workspace_filter = build_workspace_filter()?; - let logging_result = if let Some(sse_logging_layer) = sse_logging_layer_opt { - logging.with(sse_logging_layer).try_init() - } else { - logging.try_init() - }; + let mut logging_layers = Vec::new(); + + logging_layers.push( + stdout_logging_layer + .with_filter(logger_config.debug_level) + .with_filter(workspace_filter.clone()) + .boxed(), + ); + + if let Some(file_logging_layer) = file_logging_layer { + logging_layers.push( + file_logging_layer + .with_filter(logger_config.logfile_debug_level) + .with_filter(workspace_filter) + .boxed(), + ); + } + + if let Some(sse_logging_layer) = sse_logging_layer_opt { + logging_layers.push(sse_logging_layer.boxed()); + } + + if let Some(libp2p_discv5_layer) = libp2p_discv5_layer { + logging_layers.push( + libp2p_discv5_layer + .with_filter( + EnvFilter::builder() + .with_default_directive(Level::DEBUG.into()) + .from_env_lossy(), + ) + .boxed(), + ); + } + + logging_layers.push(MetricsLayer.boxed()); + + let logging_result = tracing_subscriber::registry() + .with(logging_layers) + .try_init(); if let Err(e) = logging_result { - eprintln!("Failed to initialize dependency logging: {e}"); + eprintln!("Failed to initialize logger: {e}"); } let mut environment = builder diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index eccd97d486..b9edeceaee 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -70,6 +70,22 @@ fn validators_and_secrets_dir_flags() { }); } +#[test] +fn datadir_and_secrets_dir_flags() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("datadir", dir.path().join("data").to_str()) + .flag("secrets-dir", dir.path().join("secrets").to_str()) + .run_with_no_datadir() + .with_config(|config| { + assert_eq!( + config.validator_dir, + dir.path().join("data").join("validators") + ); + assert_eq!(config.secrets_dir, dir.path().join("secrets")); + }); +} + #[test] fn validators_dir_alias_flags() { let dir = TempDir::new().expect("Unable to create temporary directory"); diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml index 80b4bc95c6..d47dfa6b5a 100644 --- a/scripts/local_testnet/network_params_das.yaml +++ b/scripts/local_testnet/network_params_das.yaml @@ -1,6 +1,7 @@ participants: - cl_type: lighthouse cl_image: lighthouse:local + el_image: ethpandaops/geth:engine-getblobs-v2-3676b56 cl_extra_params: - --subscribe-all-data-column-subnets - --subscribe-all-subnets @@ -10,6 +11,7 @@ participants: count: 2 - cl_type: lighthouse cl_image: lighthouse:local + el_image: ethpandaops/geth:engine-getblobs-v2-3676b56 cl_extra_params: # Note: useful for testing range sync (only produce block if node is in sync to prevent forking) - --sync-tolerance-epochs=0 @@ -19,6 +21,10 @@ network_params: electra_fork_epoch: 1 fulu_fork_epoch: 2 seconds_per_slot: 6 + max_blobs_per_block_electra: 64 + target_blobs_per_block_electra: 48 + max_blobs_per_block_fulu: 64 + target_blobs_per_block_fulu: 48 snooper_enabled: false global_log_level: debug additional_services: @@ -26,4 +32,8 @@ additional_services: - spamoor_blob - prometheus_grafana dora_params: - image: ethpandaops/dora:fulu-support \ No newline at end of file + image: ethpandaops/dora:fulu-support +spamoor_blob_params: + # Throughput of spamoor + # Defaults to 3 + throughput: 32 \ No newline at end of file diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index c32a670e9a..c3a56ec11a 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-beta.2 +TESTS_TAG := v1.5.0-beta.4 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 4e744b797a..3aeff8ce06 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -50,6 +50,8 @@ excluded_paths = [ # TODO(das): Fulu tests are ignored for now "tests/.*/fulu", "tests/.*/fulu/ssz_static/MatrixEntry", + "tests/.*/eip7441", + "tests/.*/eip7732", ] diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 4a202ee3d2..31662e831a 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -84,11 +84,11 @@ pub use transition::TransitionTest; /// /// The feature tests can be run with one of the following methods: /// 1. `handler.run_for_feature(feature_name)` for new tests that are not on existing fork, i.e. a -/// new handler. This will be temporary and the test will need to be updated to use -/// `handle.run()` once the feature is incorporated into a fork. +/// new handler. This will be temporary and the test will need to be updated to use +/// `handle.run()` once the feature is incorporated into a fork. /// 2. `handler.run()` for tests that are already on existing forks, but with new test vectors for -/// the feature. In this case the `handler.is_enabled_for_feature` will need to be implemented -/// to return `true` for the feature in order for the feature test vector to be tested. +/// the feature. In this case the `handler.is_enabled_for_feature` will need to be implemented +/// to return `true` for the feature in order for the feature test vector to be tested. #[derive(Debug, PartialEq, Clone, Copy)] pub enum FeatureName { // TODO(fulu): to be removed once we start using Fulu types for test vectors. diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 01c87b40fc..b507383190 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -3,6 +3,7 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use ::fork_choice::{PayloadVerificationStatus, ProposerHeadError}; use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head; use beacon_chain::blob_verification::GossipBlobError; +use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::chain_config::{ DisallowedReOrgOffsets, DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_PARENT_THRESHOLD, @@ -143,7 +144,7 @@ impl LoadCase for ForkChoiceTest { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let description = path .iter() - .last() + .next_back() .expect("path must be non-empty") .to_str() .expect("path must be valid OsStr") @@ -519,7 +520,7 @@ impl Tester { let result: Result, _> = self .block_on_dangerous(self.harness.chain.process_block( block_root, - block.clone(), + RpcBlock::new_without_blobs(Some(block_root), block.clone(), 0), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 28ff944799..55c42eb9d3 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -7,7 +7,9 @@ edition = { workspace = true } async-channel = { workspace = true } deposit_contract = { workspace = true } ethers-core = { workspace = true } +ethers-middleware = { workspace = true } ethers-providers = { workspace = true } +ethers-signers = { workspace = true } execution_layer = { workspace = true } fork_choice = { workspace = true } futures = { workspace = true } diff --git a/testing/execution_engine_integration/src/geth.rs b/testing/execution_engine_integration/src/geth.rs index ea143ed433..8c39fda4e3 100644 --- a/testing/execution_engine_integration/src/geth.rs +++ b/testing/execution_engine_integration/src/geth.rs @@ -7,10 +7,7 @@ use std::{env, fs}; use tempfile::TempDir; use unused_port::unused_tcp4_port; -// This is not currently used due to the following breaking changes in geth that requires updating our tests: -// 1. removal of `personal` namespace in v1.14.12: See #30704 -// 2. removal of `totalDifficulty` field from RPC in v1.14.11. See #30386. -// const GETH_BRANCH: &str = "master"; +const GETH_BRANCH: &str = "master"; const GETH_REPO_URL: &str = "https://github.com/ethereum/go-ethereum"; pub fn build_result(repo_dir: &Path) -> Output { @@ -30,14 +27,12 @@ pub fn build(execution_clients_dir: &Path) { } // Get the latest tag on the branch - // let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); - // Using an older release due to breaking changes in recent releases. See comment on `GETH_BRANCH` const. - let release_tag = "v1.14.10"; - build_utils::checkout(&repo_dir, dbg!(release_tag)).unwrap(); + let last_release = build_utils::get_latest_release(&repo_dir, GETH_BRANCH).unwrap(); + build_utils::checkout(&repo_dir, dbg!(&last_release)).unwrap(); // Build geth build_utils::check_command_output(build_result(&repo_dir), || { - format!("geth make failed using release {release_tag}") + format!("geth make failed using release {last_release}") }); } @@ -102,7 +97,7 @@ impl GenericExecutionEngine for GethEngine { .arg(datadir.path().to_str().unwrap()) .arg("--http") .arg("--http.api") - .arg("engine,eth,personal") + .arg("engine,eth") .arg("--http.port") .arg(http_port.to_string()) .arg("--authrpc.port") diff --git a/testing/execution_engine_integration/src/main.rs b/testing/execution_engine_integration/src/main.rs index efb06833f6..d453c415d4 100644 --- a/testing/execution_engine_integration/src/main.rs +++ b/testing/execution_engine_integration/src/main.rs @@ -32,12 +32,12 @@ fn main() { fn test_geth() { let test_dir = build_utils::prepare_dir(); geth::build(&test_dir); - TestRig::new(GethEngine).perform_tests_blocking(); + TestRig::new(GethEngine, true).perform_tests_blocking(); geth::clean(&test_dir); } fn test_nethermind() { let test_dir = build_utils::prepare_dir(); nethermind::build(&test_dir); - TestRig::new(NethermindEngine).perform_tests_blocking(); + TestRig::new(NethermindEngine, false).perform_tests_blocking(); } diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index cf31c184fe..b0d115960c 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -2,7 +2,9 @@ use crate::execution_engine::{ ExecutionEngine, GenericExecutionEngine, ACCOUNT1, ACCOUNT2, KEYSTORE_PASSWORD, PRIVATE_KEYS, }; use crate::transactions::transactions; +use ethers_middleware::SignerMiddleware; use ethers_providers::Middleware; +use ethers_signers::LocalWallet; use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, @@ -44,6 +46,7 @@ pub struct TestRig { ee_b: ExecutionPair, spec: ChainSpec, _runtime_shutdown: async_channel::Sender<()>, + use_local_signing: bool, } /// Import a private key into the execution engine and unlock it so that we can @@ -104,7 +107,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: } impl TestRig { - pub fn new(generic_engine: Engine) -> Self { + pub fn new(generic_engine: Engine, use_local_signing: bool) -> Self { let runtime = Arc::new( tokio::runtime::Builder::new_multi_thread() .enable_all() @@ -166,6 +169,7 @@ impl TestRig { ee_b, spec, _runtime_shutdown: runtime_shutdown, + use_local_signing, } } @@ -197,15 +201,9 @@ impl TestRig { pub async fn perform_tests(&self) { self.wait_until_synced().await; - // Import and unlock all private keys to sign transactions - let _ = futures::future::join_all([&self.ee_a, &self.ee_b].iter().map(|ee| { - import_and_unlock( - ee.execution_engine.http_url(), - &PRIVATE_KEYS, - KEYSTORE_PASSWORD, - ) - })) - .await; + // Create a local signer in case we need to sign transactions locally + let wallet1: LocalWallet = PRIVATE_KEYS[0].parse().expect("Invalid private key"); + let signer = SignerMiddleware::new(&self.ee_a.execution_engine.provider, wallet1); // We hardcode the accounts here since some EEs start with a default unlocked account let account1 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT1).unwrap()); @@ -236,15 +234,38 @@ impl TestRig { // Submit transactions before getting payload let txs = transactions::(account1, account2); let mut pending_txs = Vec::new(); - for tx in txs.clone().into_iter() { - let pending_tx = self - .ee_a - .execution_engine - .provider - .send_transaction(tx, None) - .await - .unwrap(); - pending_txs.push(pending_tx); + + if self.use_local_signing { + // Sign locally with the Signer middleware + for (i, tx) in txs.clone().into_iter().enumerate() { + // The local signer uses eth_sendRawTransaction, so we need to manually set the nonce + let mut tx = tx.clone(); + tx.set_nonce(i as u64); + let pending_tx = signer.send_transaction(tx, None).await.unwrap(); + pending_txs.push(pending_tx); + } + } else { + // Sign on the EE + // Import and unlock all private keys to sign transactions on the EE + let _ = futures::future::join_all([&self.ee_a, &self.ee_b].iter().map(|ee| { + import_and_unlock( + ee.execution_engine.http_url(), + &PRIVATE_KEYS, + KEYSTORE_PASSWORD, + ) + })) + .await; + + for tx in txs.clone().into_iter() { + let pending_tx = self + .ee_a + .execution_engine + .provider + .send_transaction(tx, None) + .await + .unwrap(); + pending_txs.push(pending_tx); + } } /* diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 4cd599f845..6afc7771d4 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -15,7 +15,6 @@ use std::sync::Arc; use std::time::Duration; use environment::tracing_common; -use logging::MetricsLayer; use tracing_subscriber::prelude::*; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; @@ -90,12 +89,11 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { let ( env_builder, - _libp2p_discv5_layer, - file_logging_layer, - stdout_logging_layer, - _sse_logging_layer_opt, logger_config, - _dependency_log_filter, + stdout_logging_layer, + _file_logging_layer, + _sse_logging_layer_opt, + _libp2p_discv5_layer, ) = tracing_common::construct_logger( LoggerConfig { path: None, @@ -118,9 +116,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { ); if let Err(e) = tracing_subscriber::registry() - .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) .with(stdout_logging_layer.with_filter(logger_config.debug_level)) - .with(MetricsLayer) .try_init() { eprintln!("Failed to initialize dependency logging: {e}"); diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index 384699c64c..f4e0d20f38 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -5,7 +5,6 @@ use clap::ArgMatches; use crate::retry::with_retry; use environment::tracing_common; use futures::prelude::*; -use logging::MetricsLayer; use node_test_rig::{ environment::{EnvironmentBuilder, LoggerConfig}, testing_validator_config, ValidatorFiles, @@ -94,12 +93,11 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { let ( env_builder, - libp2p_discv5_layer, - file_logging_layer, - stdout_logging_layer, - _sse_logging_layer_opt, logger_config, - dependency_log_filter, + stdout_logging_layer, + _file_logging_layer, + _sse_logging_layer_opt, + _libp2p_discv5_layer, ) = tracing_common::construct_logger( LoggerConfig { path: None, @@ -122,11 +120,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { ); if let Err(e) = tracing_subscriber::registry() - .with(dependency_log_filter) - .with(file_logging_layer.with_filter(logger_config.logfile_debug_level)) .with(stdout_logging_layer.with_filter(logger_config.debug_level)) - .with(libp2p_discv5_layer) - .with(MetricsLayer) .try_init() { eprintln!("Failed to initialize dependency logging: {e}"); diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index 6559a2bb9e..13494e5fa6 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -92,7 +92,7 @@ fn keystore_pubkey(keystore: &Keystore) -> PublicKeyBytes { } fn all_with_status(count: usize, status: T) -> impl Iterator { - std::iter::repeat(status).take(count) + std::iter::repeat_n(status, count) } fn all_imported(count: usize) -> impl Iterator { @@ -1059,7 +1059,7 @@ async fn migrate_some_extra_slashing_protection() { /// - `first_vc_attestations`: attestations to sign on the first VC as `(validator_idx, att)` /// - `delete_indices`: validators to delete from the first VC /// - `slashing_protection_indices`: validators to transfer slashing protection data for. It should -/// be a subset of `delete_indices` or the test will panic. +/// be a subset of `delete_indices` or the test will panic. /// - `import_indices`: validators to transfer. It needn't be a subset of `delete_indices`. /// - `second_vc_attestations`: attestations to sign on the second VC after the transfer. The bool /// indicates whether the signing should be successful. diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 18bd736957..3dd138619b 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -67,7 +67,6 @@ pub struct ValidatorClient { #[clap( long, value_name = "SECRETS_DIRECTORY", - conflicts_with = "datadir", help = "The directory which contains the password to unlock the validator \ voting keypairs. Each password should be contained in a file where the \ name is the 0x-prefixed hex representation of the validators voting public \ diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index d59918657b..45e37e9276 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -264,9 +264,9 @@ impl ValidatorStore { /// are two primary functions used here: /// /// - `DoppelgangerStatus::only_safe`: only returns pubkeys which have passed doppelganger - /// protection and are safe-enough to sign messages. + /// protection and are safe-enough to sign messages. /// - `DoppelgangerStatus::ignored`: returns all the pubkeys from `only_safe` *plus* those still - /// undergoing protection. This is useful for collecting duties or other non-signing tasks. + /// undergoing protection. This is useful for collecting duties or other non-signing tasks. #[allow(clippy::needless_collect)] // Collect is required to avoid holding a lock. pub fn voting_pubkeys(&self, filter_func: F) -> I where